code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
.feedback-section {
.no-feedback {
min-height: 100px;
}
.new-feedback-btn {
gap: 5px;
}
}
.feedback-summary-section {
.rating-summary-numbers {
display: flex;
flex-direction: column;
align-items: center;
border-right: 1px solid var(--gray-100);
.average-rating {
font-size: 2rem;
}
.feedback-count {
margin-top: -0.5rem;
}
}
.rating-progress-bar-section {
padding-bottom: 2rem;
.rating-bar-title {
margin-left: -15px;
}
.rating-progress-bar {
margin-bottom: 4px;
height: 7px;
margin-top: 6px;
}
.progress-bar-cosmetic {
background-color: var(--gray-600);
border-radius: var(--border-radius);
}
}
.ratings-pill {
background-color: var(--gray-100);
padding: 0.5rem 1rem;
border-radius: 66px;
}
}
.feedback-history {
.feedback-content {
border-radius: var(--border-radius);
border: 1px solid var(--border-color);
}
.feedback-content:last-child {
border-bottom: 1px solid var(--border-color);
}
}
|
2302_79757062/hrms
|
hrms/public/scss/feedback.scss
|
SCSS
|
agpl-3.0
| 988
|
.node-card {
background: white;
border-radius: 0.5rem;
padding: 0.75rem;
margin-left: 3rem;
width: 18rem;
overflow: hidden;
.btn-edit-node {
display: none;
}
.edit-chart-node {
display: none;
}
.node-edit-icon {
display: none;
}
}
.node-card.exported {
box-shadow: none;
}
.node-image {
width: 3rem;
height: 3rem;
}
.node-name {
font-size: var(--text-lg);
color: var(--text-color);
line-height: 1.72;
}
.node-title {
font-size: 0.75rem;
line-height: 1.35;
}
.node-info {
width: 12.7rem;
}
.node-connections {
font-size: 0.75rem;
line-height: 1.35;
}
.node-card.active {
background: var(--gray-100);
border: 1px solid var(--gray-600);
box-shadow: var(--shadow-md);
border-radius: 0.5rem;
padding: 0.75rem;
width: 18rem;
.btn-edit-node {
display: flex;
background: var(--gray-300);
color: var(--gray-800);
font-size: 0.75rem;
align-items: center;
justify-content: center;
box-shadow: var(--shadow-sm);
gap: 6px;
}
.edit-chart-node {
display: block;
}
.node-edit-icon {
display: block;
}
.node-edit-icon > .icon {
margin-top: -3px;
}
.node-name {
align-items: center;
justify-content: space-between;
margin-bottom: 2px;
width: 12.2rem;
}
}
.node-card.active-path {
background: var(--gray-100);
border: 1px solid var(--gray-300);
box-shadow: var(--shadow-sm);
border-radius: 0.5rem;
padding: 0.75rem;
width: 15rem;
height: 3rem;
.btn-edit-node {
display: none !important;
}
.edit-chart-node {
display: none;
}
.node-edit-icon {
display: none;
}
.node-info {
display: none;
}
.node-title {
display: none;
}
.node-connections {
display: none;
}
.node-name {
font-size: 0.85rem;
line-height: 1.35;
}
.node-image {
width: 1.5rem;
height: 1.5rem;
}
.node-meta {
align-items: baseline;
}
}
.node-card.collapsed {
background: white;
border-radius: 0.5rem;
padding: 0.75rem;
width: 15rem;
height: 3rem;
.btn-edit-node {
display: none !important;
}
.edit-chart-node {
display: none;
}
.node-edit-icon {
display: none;
}
.node-info {
display: none;
}
.node-title {
display: none;
}
.node-connections {
display: none;
}
.node-name {
font-size: 0.85rem;
line-height: 1.35;
}
.node-image {
width: 1.5rem;
height: 1.5rem;
}
.node-meta {
align-items: baseline;
}
}
// horizontal hierarchy tree view
#hierarchy-chart-wrapper {
padding-top: 30px;
#arrows {
margin-top: -80px;
}
}
.hierarchy {
display: flex;
}
.hierarchy li {
list-style-type: none;
}
.child-node {
margin: 0px 0px 16px 0px;
}
.hierarchy,
.hierarchy-mobile {
.level {
margin-right: 8px;
align-items: flex-start;
flex-direction: column;
}
}
#arrows {
position: absolute;
overflow: visible;
}
.active-connector {
stroke: var(--gray-600);
}
.collapsed-connector {
stroke: var(--gray-400);
}
// mobile
.hierarchy-mobile {
display: flex;
flex-direction: column;
align-items: center;
padding-top: 10px;
padding-left: 0px;
}
.hierarchy-mobile li {
list-style-type: none;
display: flex;
flex-direction: column;
align-items: flex-end;
}
.mobile-node {
margin-left: 0;
}
.mobile-node.active-path {
width: 12.25rem;
}
.active-child {
width: 15.5rem;
}
.mobile-node .node-connections {
max-width: 80px;
}
.hierarchy-mobile .node-children {
margin-top: 16px;
}
.root-level .node-card {
margin: 0 0 16px;
}
// node group
.collapsed-level {
margin-bottom: 16px;
width: 18rem;
}
.node-group {
background: white;
border: 1px solid var(--border-color);
box-shadow: var(--shadow-sm);
border-radius: 0.5rem;
padding: 0.75rem;
width: 18rem;
height: 3rem;
overflow: hidden;
align-items: center;
}
.node-group .avatar-group {
margin-left: 0px;
}
.node-group .avatar-extra-count {
background-color: var(--gray-100);
color: var(--gray-500);
}
.node-group .avatar-frame {
width: 1.5rem;
height: 1.5rem;
}
.node-group.collapsed {
width: 5rem;
margin-left: 12px;
}
.sibling-group {
display: flex;
flex-direction: column;
align-items: center;
}
|
2302_79757062/hrms
|
hrms/public/scss/hierarchy_chart.scss
|
SCSS
|
agpl-3.0
| 4,045
|
@import "./feedback";
@import "./circular_progress";
@import "./hierarchy_chart";
|
2302_79757062/hrms
|
hrms/public/scss/hrms.bundle.scss
|
SCSS
|
agpl-3.0
| 82
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from hrms.setup import delete_custom_fields
def setup():
make_custom_fields()
add_custom_roles_for_reports()
create_gratuity_rule_for_india()
def uninstall():
custom_fields = get_custom_fields()
delete_custom_fields(custom_fields)
def make_custom_fields(update=True):
custom_fields = get_custom_fields()
create_custom_fields(custom_fields, update=update)
def get_custom_fields():
return {
"Salary Component": [
{
"fieldname": "component_type",
"label": "Component Type",
"fieldtype": "Select",
"insert_after": "description",
"options": (
"\nProvident Fund\nAdditional Provident Fund\nProvident Fund Loan\nProfessional Tax"
),
"depends_on": 'eval:doc.type == "Deduction"',
"translatable": 0,
},
],
"Employee": [
{
"fieldname": "bank_cb",
"fieldtype": "Column Break",
"insert_after": "bank_ac_no",
},
{
"fieldname": "ifsc_code",
"label": "IFSC Code",
"fieldtype": "Data",
"insert_after": "bank_cb",
"print_hide": 1,
"depends_on": 'eval:doc.salary_mode == "Bank"',
"translatable": 0,
},
{
"fieldname": "pan_number",
"label": "PAN Number",
"fieldtype": "Data",
"insert_after": "payroll_cost_center",
"print_hide": 1,
"translatable": 0,
},
{
"fieldname": "micr_code",
"label": "MICR Code",
"fieldtype": "Data",
"insert_after": "ifsc_code",
"print_hide": 1,
"depends_on": 'eval:doc.salary_mode == "Bank"',
"translatable": 0,
},
{
"fieldname": "provident_fund_account",
"label": "Provident Fund Account",
"fieldtype": "Data",
"insert_after": "pan_number",
"translatable": 0,
},
],
"Company": [
{
"fieldname": "hra_section",
"label": "HRA Settings",
"fieldtype": "Section Break",
"insert_after": "asset_received_but_not_billed",
"collapsible": 1,
},
{
"fieldname": "basic_component",
"label": "Basic Component",
"fieldtype": "Link",
"options": "Salary Component",
"insert_after": "hra_section",
},
{
"fieldname": "hra_component",
"label": "HRA Component",
"fieldtype": "Link",
"options": "Salary Component",
"insert_after": "basic_component",
},
{
"fieldname": "hra_column_break",
"fieldtype": "Column Break",
"insert_after": "hra_component",
},
{
"fieldname": "arrear_component",
"label": "Arrear Component",
"fieldtype": "Link",
"options": "Salary Component",
"insert_after": "hra_column_break",
},
],
"Employee Tax Exemption Declaration": [
{
"fieldname": "hra_section",
"label": "HRA Exemption",
"fieldtype": "Section Break",
"insert_after": "declarations",
},
{
"fieldname": "monthly_house_rent",
"label": "Monthly House Rent",
"fieldtype": "Currency",
"insert_after": "hra_section",
},
{
"fieldname": "rented_in_metro_city",
"label": "Rented in Metro City",
"fieldtype": "Check",
"insert_after": "monthly_house_rent",
"depends_on": "monthly_house_rent",
},
{
"fieldname": "salary_structure_hra",
"label": "HRA as per Salary Structure",
"fieldtype": "Currency",
"insert_after": "rented_in_metro_city",
"read_only": 1,
"depends_on": "monthly_house_rent",
},
{
"fieldname": "hra_column_break",
"fieldtype": "Column Break",
"insert_after": "salary_structure_hra",
"depends_on": "monthly_house_rent",
},
{
"fieldname": "annual_hra_exemption",
"label": "Annual HRA Exemption",
"fieldtype": "Currency",
"insert_after": "hra_column_break",
"read_only": 1,
"depends_on": "monthly_house_rent",
},
{
"fieldname": "monthly_hra_exemption",
"label": "Monthly HRA Exemption",
"fieldtype": "Currency",
"insert_after": "annual_hra_exemption",
"read_only": 1,
"depends_on": "monthly_house_rent",
},
],
"Employee Tax Exemption Proof Submission": [
{
"fieldname": "hra_section",
"label": "HRA Exemption",
"fieldtype": "Section Break",
"insert_after": "tax_exemption_proofs",
},
{
"fieldname": "house_rent_payment_amount",
"label": "House Rent Payment Amount",
"fieldtype": "Currency",
"insert_after": "hra_section",
},
{
"fieldname": "rented_in_metro_city",
"label": "Rented in Metro City",
"fieldtype": "Check",
"insert_after": "house_rent_payment_amount",
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "rented_from_date",
"label": "Rented From Date",
"fieldtype": "Date",
"insert_after": "rented_in_metro_city",
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "rented_to_date",
"label": "Rented To Date",
"fieldtype": "Date",
"insert_after": "rented_from_date",
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "hra_column_break",
"fieldtype": "Column Break",
"insert_after": "rented_to_date",
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "monthly_house_rent",
"label": "Monthly House Rent",
"fieldtype": "Currency",
"insert_after": "hra_column_break",
"read_only": 1,
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "monthly_hra_exemption",
"label": "Monthly Eligible Amount",
"fieldtype": "Currency",
"insert_after": "monthly_house_rent",
"read_only": 1,
"depends_on": "house_rent_payment_amount",
},
{
"fieldname": "total_eligible_hra_exemption",
"label": "Total Eligible HRA Exemption",
"fieldtype": "Currency",
"insert_after": "monthly_hra_exemption",
"read_only": 1,
"depends_on": "house_rent_payment_amount",
},
],
}
def add_custom_roles_for_reports():
for report_name in (
"Professional Tax Deductions",
"Provident Fund Deductions",
"Income Tax Deductions",
):
if not frappe.db.get_value("Custom Role", dict(report=report_name)):
doc = frappe.new_doc("Custom Role")
doc.update(
dict(
report=report_name,
roles=[dict(role="HR User"), dict(role="HR Manager"), dict(role="Employee")],
)
).insert(ignore_permissions=True)
def create_gratuity_rule_for_india():
if not frappe.db.exists("DocType", "Gratuity Rule"):
return
if frappe.db.exists("Gratuity Rule", "Indian Standard Gratuity Rule"):
return
rule = frappe.new_doc("Gratuity Rule")
rule.update(
{
"name": "Indian Standard Gratuity Rule",
"calculate_gratuity_amount_based_on": "Current Slab",
"work_experience_calculation_method": "Round Off Work Experience",
"minimum_year_for_gratuity": 5,
"gratuity_rule_slabs": [
{
"from_year": 0,
"to_year": 0,
"fraction_of_applicable_earnings": 15 / 26,
}
],
}
)
rule.insert(ignore_permissions=True, ignore_mandatory=True)
|
2302_79757062/hrms
|
hrms/regional/india/setup.py
|
Python
|
agpl-3.0
| 7,052
|
import math
import frappe
from frappe import _
from frappe.utils import add_days, date_diff, flt, get_link_to_form, month_diff
from hrms.hr.utils import get_salary_assignments
from hrms.payroll.doctype.salary_structure.salary_structure import make_salary_slip
def calculate_annual_eligible_hra_exemption(doc):
basic_component, hra_component = frappe.db.get_value(
"Company", doc.company, ["basic_component", "hra_component"]
)
if not (basic_component and hra_component):
frappe.throw(
_("Please set Basic and HRA component in Company {0}").format(
get_link_to_form("Company", doc.company)
)
)
annual_exemption = monthly_exemption = hra_amount = basic_amount = 0
if hra_component and basic_component:
assignments = get_salary_assignments(doc.employee, doc.payroll_period)
if not assignments and doc.docstatus == 1:
frappe.throw(_("Salary Structure must be submitted before submission of {0}").format(doc.doctype))
period_start_date = frappe.db.get_value("Payroll Period", doc.payroll_period, "start_date")
assignment_dates = []
for assignment in assignments:
# if assignment is before payroll period, use period start date to get the correct days
assignment.from_date = max(assignment.from_date, period_start_date)
assignment_dates.append(assignment.from_date)
for idx, assignment in enumerate(assignments):
if has_hra_component(assignment.salary_structure, hra_component):
basic_salary_amt, hra_salary_amt = get_component_amt_from_salary_slip(
doc.employee,
assignment.salary_structure,
basic_component,
hra_component,
assignment.from_date,
)
to_date = get_end_date_for_assignment(assignment_dates, idx, doc.payroll_period)
frequency = frappe.get_value(
"Salary Structure", assignment.salary_structure, "payroll_frequency"
)
basic_amount += get_component_pay(frequency, basic_salary_amt, assignment.from_date, to_date)
hra_amount += get_component_pay(frequency, hra_salary_amt, assignment.from_date, to_date)
if hra_amount:
if doc.monthly_house_rent:
annual_exemption = calculate_hra_exemption(
assignment.salary_structure,
basic_amount,
hra_amount,
doc.monthly_house_rent,
doc.rented_in_metro_city,
)
if annual_exemption > 0:
monthly_exemption = annual_exemption / 12
else:
annual_exemption = 0
return frappe._dict(
{
"hra_amount": hra_amount,
"annual_exemption": annual_exemption,
"monthly_exemption": monthly_exemption,
}
)
def has_hra_component(salary_structure, hra_component):
return frappe.db.exists(
"Salary Detail",
{
"parent": salary_structure,
"salary_component": hra_component,
"parentfield": "earnings",
"parenttype": "Salary Structure",
},
)
def get_end_date_for_assignment(assignment_dates, idx, payroll_period):
end_date = None
try:
end_date = assignment_dates[idx + 1]
end_date = add_days(end_date, -1)
except IndexError:
pass
if not end_date:
end_date = frappe.db.get_value("Payroll Period", payroll_period, "end_date")
return end_date
def get_component_amt_from_salary_slip(employee, salary_structure, basic_component, hra_component, from_date):
salary_slip = make_salary_slip(
salary_structure,
employee=employee,
for_preview=1,
ignore_permissions=True,
posting_date=from_date,
)
basic_amt, hra_amt = 0, 0
for earning in salary_slip.earnings:
if earning.salary_component == basic_component:
basic_amt = earning.amount
elif earning.salary_component == hra_component:
hra_amt = earning.amount
if basic_amt and hra_amt:
return basic_amt, hra_amt
return basic_amt, hra_amt
def calculate_hra_exemption(
salary_structure, annual_basic, annual_hra, monthly_house_rent, rented_in_metro_city
):
# TODO make this configurable
exemptions = []
# case 1: The actual amount allotted by the employer as the HRA.
exemptions.append(annual_hra)
# case 2: Actual rent paid less 10% of the basic salary.
actual_annual_rent = monthly_house_rent * 12
exemptions.append(flt(actual_annual_rent) - flt(annual_basic * 0.1))
# case 3: 50% of the basic salary, if the employee is staying in a metro city (40% for a non-metro city).
exemptions.append(annual_basic * 0.5 if rented_in_metro_city else annual_basic * 0.4)
# return minimum of 3 cases
return min(exemptions)
def get_component_pay(frequency, amount, from_date, to_date):
days = date_diff(to_date, from_date) + 1
if frequency == "Daily":
return amount * days
elif frequency == "Weekly":
return amount * math.floor(days / 7)
elif frequency == "Fortnightly":
return amount * math.floor(days / 14)
elif frequency == "Monthly":
return amount * month_diff(to_date, from_date)
elif frequency == "Bimonthly":
return amount * (month_diff(to_date, from_date) / 2)
def validate_house_rent_dates(doc):
if not doc.rented_to_date or not doc.rented_from_date:
frappe.throw(_("House rented dates required for exemption calculation"))
if date_diff(doc.rented_to_date, doc.rented_from_date) < 14:
frappe.throw(_("House rented dates should be atleast 15 days apart"))
proofs = frappe.db.sql(
"""
select name
from `tabEmployee Tax Exemption Proof Submission`
where
docstatus=1 and employee=%(employee)s and payroll_period=%(payroll_period)s
and (rented_from_date between %(from_date)s and %(to_date)s or rented_to_date between %(from_date)s and %(to_date)s)
""",
{
"employee": doc.employee,
"payroll_period": doc.payroll_period,
"from_date": doc.rented_from_date,
"to_date": doc.rented_to_date,
},
)
if proofs:
frappe.throw(_("House rent paid days overlapping with {0}").format(proofs[0][0]))
def calculate_hra_exemption_for_period(doc):
monthly_rent, eligible_hra = 0, 0
if doc.house_rent_payment_amount:
validate_house_rent_dates(doc)
# TODO receive rented months or validate dates are start and end of months?
# Calc monthly rent, round to nearest .5
factor = flt(date_diff(doc.rented_to_date, doc.rented_from_date) + 1) / 30
factor = round(factor * 2) / 2
monthly_rent = doc.house_rent_payment_amount / factor
# update field used by calculate_annual_eligible_hra_exemption
doc.monthly_house_rent = monthly_rent
exemptions = calculate_annual_eligible_hra_exemption(doc)
if exemptions["monthly_exemption"]:
# calc total exemption amount
eligible_hra = exemptions["monthly_exemption"] * factor
exemptions["monthly_house_rent"] = monthly_rent
exemptions["total_eligible_hra_exemption"] = eligible_hra
return exemptions
|
2302_79757062/hrms
|
hrms/regional/india/utils.py
|
Python
|
agpl-3.0
| 6,535
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
def setup():
create_gratuity_rules_for_uae()
def create_gratuity_rules_for_uae():
docs = get_gratuity_rules()
for d in docs:
doc = frappe.get_doc(d)
doc.insert(ignore_if_duplicate=True, ignore_permissions=True, ignore_mandatory=True)
def get_gratuity_rules():
return [
{
"doctype": "Gratuity Rule",
"name": "Rule Under Limited Contract (UAE)",
"calculate_gratuity_amount_based_on": "Sum of all previous slabs",
"work_experience_calculation_method": "Take Exact Completed Years",
"minimum_year_for_gratuity": 1,
"gratuity_rule_slabs": [
{"from_year": 0, "to_year": 1, "fraction_of_applicable_earnings": 0},
{"from_year": 1, "to_year": 5, "fraction_of_applicable_earnings": 21 / 30},
{"from_year": 5, "to_year": 0, "fraction_of_applicable_earnings": 1},
],
},
{
"doctype": "Gratuity Rule",
"name": "Rule Under Unlimited Contract on termination (UAE)",
"calculate_gratuity_amount_based_on": "Current Slab",
"work_experience_calculation_method": "Take Exact Completed Years",
"minimum_year_for_gratuity": 1,
"gratuity_rule_slabs": [
{"from_year": 0, "to_year": 1, "fraction_of_applicable_earnings": 0},
{"from_year": 1, "to_year": 5, "fraction_of_applicable_earnings": 21 / 30},
{"from_year": 5, "to_year": 0, "fraction_of_applicable_earnings": 1},
],
},
{
"doctype": "Gratuity Rule",
"name": "Rule Under Unlimited Contract on resignation (UAE)",
"calculate_gratuity_amount_based_on": "Current Slab",
"work_experience_calculation_method": "Take Exact Completed Years",
"minimum_year_for_gratuity": 1,
"gratuity_rule_slabs": [
{"from_year": 0, "to_year": 1, "fraction_of_applicable_earnings": 0},
{"from_year": 1, "to_year": 3, "fraction_of_applicable_earnings": 1 / 3 * 21 / 30},
{"from_year": 3, "to_year": 5, "fraction_of_applicable_earnings": 2 / 3 * 21 / 30},
{"from_year": 5, "to_year": 0, "fraction_of_applicable_earnings": 21 / 30},
],
},
]
|
2302_79757062/hrms
|
hrms/regional/united_arab_emirates/setup.py
|
Python
|
agpl-3.0
| 2,116
|
import os
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from frappe.desk.page.setup_wizard.install_fixtures import (
_, # NOTE: this is not the real translation function
)
from frappe.desk.page.setup_wizard.setup_wizard import make_records
from frappe.installer import update_site_config
from hrms.overrides.company import delete_company_fixtures
def after_install():
create_custom_fields(get_custom_fields(), ignore_validate=True)
create_salary_slip_loan_fields()
make_fixtures()
setup_notifications()
update_hr_defaults()
add_non_standard_user_types()
set_single_defaults()
create_default_role_profiles()
run_post_install_patches()
def before_uninstall():
delete_custom_fields(get_custom_fields())
delete_custom_fields(get_salary_slip_loan_fields())
delete_company_fixtures()
def after_app_install(app_name):
"""Set up loan integration with payroll"""
if app_name != "lending":
return
print("Updating payroll setup for loans")
create_custom_fields(get_salary_slip_loan_fields(), ignore_validate=True)
add_lending_docperms_to_ess()
def before_app_uninstall(app_name):
"""Clean up loan integration with payroll"""
if app_name != "lending":
return
print("Updating payroll setup for loans")
delete_custom_fields(get_salary_slip_loan_fields())
remove_lending_docperms_from_ess()
def get_custom_fields():
"""HR specific custom fields that need to be added to the masters in ERPNext"""
return {
"Company": [
{
"fieldname": "hr_and_payroll_tab",
"fieldtype": "Tab Break",
"label": _("HR & Payroll"),
"insert_after": "credit_limit",
},
{
"fieldname": "hr_settings_section",
"fieldtype": "Section Break",
"label": _("HR & Payroll Settings"),
"insert_after": "hr_and_payroll_tab",
},
{
"depends_on": "eval:!doc.__islocal",
"fieldname": "default_expense_claim_payable_account",
"fieldtype": "Link",
"ignore_user_permissions": 1,
"label": _("Default Expense Claim Payable Account"),
"no_copy": 1,
"options": "Account",
"insert_after": "hr_settings_section",
},
{
"fieldname": "default_employee_advance_account",
"fieldtype": "Link",
"label": _("Default Employee Advance Account"),
"no_copy": 1,
"options": "Account",
"insert_after": "default_expense_claim_payable_account",
},
{
"fieldname": "column_break_10",
"fieldtype": "Column Break",
"insert_after": "default_employee_advance_account",
},
{
"depends_on": "eval:!doc.__islocal",
"fieldname": "default_payroll_payable_account",
"fieldtype": "Link",
"ignore_user_permissions": 1,
"label": _("Default Payroll Payable Account"),
"no_copy": 1,
"options": "Account",
"insert_after": "column_break_10",
},
],
"Department": [
{
"fieldname": "section_break_4",
"fieldtype": "Section Break",
"insert_after": "disabled",
},
{
"fieldname": "payroll_cost_center",
"fieldtype": "Link",
"label": _("Payroll Cost Center"),
"options": "Cost Center",
"insert_after": "section_break_4",
},
{
"fieldname": "column_break_9",
"fieldtype": "Column Break",
"insert_after": "payroll_cost_center",
},
{
"description": _("Days for which Holidays are blocked for this department."),
"fieldname": "leave_block_list",
"fieldtype": "Link",
"in_list_view": 1,
"label": _("Leave Block List"),
"options": "Leave Block List",
"insert_after": "column_break_9",
},
{
"description": _("The first Approver in the list will be set as the default Approver."),
"fieldname": "approvers",
"fieldtype": "Section Break",
"label": _("Approvers"),
"insert_after": "leave_block_list",
},
{
"fieldname": "shift_request_approver",
"fieldtype": "Table",
"label": _("Shift Request Approver"),
"options": "Department Approver",
"insert_after": "approvers",
},
{
"fieldname": "leave_approvers",
"fieldtype": "Table",
"label": _("Leave Approver"),
"options": "Department Approver",
"insert_after": "shift_request_approver",
},
{
"fieldname": "expense_approvers",
"fieldtype": "Table",
"label": _("Expense Approver"),
"options": "Department Approver",
"insert_after": "leave_approvers",
},
],
"Designation": [
{
"fieldname": "appraisal_template",
"fieldtype": "Link",
"label": _("Appraisal Template"),
"options": "Appraisal Template",
"insert_after": "description",
"allow_in_quick_entry": 1,
},
{
"fieldname": "required_skills_section",
"fieldtype": "Section Break",
"label": _("Required Skills"),
"insert_after": "appraisal_template",
},
{
"fieldname": "skills",
"fieldtype": "Table",
"label": _("Skills"),
"options": "Designation Skill",
"insert_after": "required_skills_section",
},
],
"Employee": [
{
"fieldname": "employment_type",
"fieldtype": "Link",
"ignore_user_permissions": 1,
"label": _("Employment Type"),
"options": "Employment Type",
"insert_after": "department",
},
{
"fieldname": "job_applicant",
"fieldtype": "Link",
"label": _("Job Applicant"),
"options": "Job Applicant",
"insert_after": "employment_details",
},
{
"fieldname": "grade",
"fieldtype": "Link",
"label": _("Grade"),
"options": "Employee Grade",
"insert_after": "branch",
},
{
"fieldname": "default_shift",
"fieldtype": "Link",
"label": _("Default Shift"),
"options": "Shift Type",
"insert_after": "holiday_list",
},
{
"collapsible": 1,
"fieldname": "health_insurance_section",
"fieldtype": "Section Break",
"label": _("Health Insurance"),
"insert_after": "health_details",
},
{
"fieldname": "health_insurance_provider",
"fieldtype": "Link",
"label": _("Health Insurance Provider"),
"options": "Employee Health Insurance",
"insert_after": "health_insurance_section",
},
{
"depends_on": "eval:doc.health_insurance_provider",
"fieldname": "health_insurance_no",
"fieldtype": "Data",
"label": _("Health Insurance No"),
"insert_after": "health_insurance_provider",
},
{
"fieldname": "approvers_section",
"fieldtype": "Section Break",
"label": _("Approvers"),
"insert_after": "default_shift",
},
{
"fieldname": "expense_approver",
"fieldtype": "Link",
"label": _("Expense Approver"),
"options": "User",
"insert_after": "approvers_section",
},
{
"fieldname": "leave_approver",
"fieldtype": "Link",
"label": _("Leave Approver"),
"options": "User",
"insert_after": "expense_approver",
},
{
"fieldname": "column_break_45",
"fieldtype": "Column Break",
"insert_after": "leave_approver",
},
{
"fieldname": "shift_request_approver",
"fieldtype": "Link",
"label": _("Shift Request Approver"),
"options": "User",
"insert_after": "column_break_45",
},
{
"fieldname": "salary_cb",
"fieldtype": "Column Break",
"insert_after": "salary_mode",
},
{
"fetch_from": "department.payroll_cost_center",
"fetch_if_empty": 1,
"fieldname": "payroll_cost_center",
"fieldtype": "Link",
"label": _("Payroll Cost Center"),
"options": "Cost Center",
"insert_after": "salary_cb",
},
],
"Project": [
{
"fieldname": "total_expense_claim",
"fieldtype": "Currency",
"label": _("Total Expense Claim (via Expense Claims)"),
"read_only": 1,
"insert_after": "total_costing_amount",
},
],
"Task": [
{
"fieldname": "total_expense_claim",
"fieldtype": "Currency",
"label": _("Total Expense Claim (via Expense Claim)"),
"options": "Company:company:default_currency",
"read_only": 1,
"insert_after": "total_costing_amount",
},
],
"Timesheet": [
{
"fieldname": "salary_slip",
"fieldtype": "Link",
"label": _("Salary Slip"),
"no_copy": 1,
"options": "Salary Slip",
"print_hide": 1,
"read_only": 1,
"insert_after": "column_break_3",
},
],
"Terms and Conditions": [
{
"default": "1",
"fieldname": "hr",
"fieldtype": "Check",
"label": _("HR"),
"insert_after": "buying",
},
],
}
def make_fixtures():
records = [
# expense claim type
{"doctype": "Expense Claim Type", "name": _("Calls"), "expense_type": _("Calls")},
{"doctype": "Expense Claim Type", "name": _("Food"), "expense_type": _("Food")},
{"doctype": "Expense Claim Type", "name": _("Medical"), "expense_type": _("Medical")},
{"doctype": "Expense Claim Type", "name": _("Others"), "expense_type": _("Others")},
{"doctype": "Expense Claim Type", "name": _("Travel"), "expense_type": _("Travel")},
# vehicle service item
{"doctype": "Vehicle Service Item", "service_item": "Brake Oil"},
{"doctype": "Vehicle Service Item", "service_item": "Brake Pad"},
{"doctype": "Vehicle Service Item", "service_item": "Clutch Plate"},
{"doctype": "Vehicle Service Item", "service_item": "Engine Oil"},
{"doctype": "Vehicle Service Item", "service_item": "Oil Change"},
{"doctype": "Vehicle Service Item", "service_item": "Wheels"},
# leave type
{
"doctype": "Leave Type",
"leave_type_name": _("Casual Leave"),
"name": _("Casual Leave"),
"allow_encashment": 1,
"is_carry_forward": 1,
"max_continuous_days_allowed": "3",
"include_holiday": 1,
},
{
"doctype": "Leave Type",
"leave_type_name": _("Compensatory Off"),
"name": _("Compensatory Off"),
"allow_encashment": 0,
"is_carry_forward": 0,
"include_holiday": 1,
"is_compensatory": 1,
},
{
"doctype": "Leave Type",
"leave_type_name": _("Sick Leave"),
"name": _("Sick Leave"),
"allow_encashment": 0,
"is_carry_forward": 0,
"include_holiday": 1,
},
{
"doctype": "Leave Type",
"leave_type_name": _("Privilege Leave"),
"name": _("Privilege Leave"),
"allow_encashment": 0,
"is_carry_forward": 0,
"include_holiday": 1,
},
{
"doctype": "Leave Type",
"leave_type_name": _("Leave Without Pay"),
"name": _("Leave Without Pay"),
"allow_encashment": 0,
"is_carry_forward": 0,
"is_lwp": 1,
"include_holiday": 1,
},
# Employment Type
{"doctype": "Employment Type", "employee_type_name": _("Full-time")},
{"doctype": "Employment Type", "employee_type_name": _("Part-time")},
{"doctype": "Employment Type", "employee_type_name": _("Probation")},
{"doctype": "Employment Type", "employee_type_name": _("Contract")},
{"doctype": "Employment Type", "employee_type_name": _("Commission")},
{"doctype": "Employment Type", "employee_type_name": _("Piecework")},
{"doctype": "Employment Type", "employee_type_name": _("Intern")},
{"doctype": "Employment Type", "employee_type_name": _("Apprentice")},
# Job Applicant Source
{"doctype": "Job Applicant Source", "source_name": _("Website Listing")},
{"doctype": "Job Applicant Source", "source_name": _("Walk In")},
{"doctype": "Job Applicant Source", "source_name": _("Employee Referral")},
{"doctype": "Job Applicant Source", "source_name": _("Campaign")},
# Offer Term
{"doctype": "Offer Term", "offer_term": _("Date of Joining")},
{"doctype": "Offer Term", "offer_term": _("Annual Salary")},
{"doctype": "Offer Term", "offer_term": _("Probationary Period")},
{"doctype": "Offer Term", "offer_term": _("Employee Benefits")},
{"doctype": "Offer Term", "offer_term": _("Working Hours")},
{"doctype": "Offer Term", "offer_term": _("Stock Options")},
{"doctype": "Offer Term", "offer_term": _("Department")},
{"doctype": "Offer Term", "offer_term": _("Job Description")},
{"doctype": "Offer Term", "offer_term": _("Responsibilities")},
{"doctype": "Offer Term", "offer_term": _("Leaves per Year")},
{"doctype": "Offer Term", "offer_term": _("Notice Period")},
{"doctype": "Offer Term", "offer_term": _("Incentives")},
# Email Account
{"doctype": "Email Account", "email_id": "jobs@example.com", "append_to": "Job Applicant"},
]
make_records(records)
def setup_notifications():
base_path = frappe.get_app_path("hrms", "hr", "doctype")
# Leave Application
response = frappe.read_file(
os.path.join(base_path, "leave_application/leave_application_email_template.html")
)
records = [
{
"doctype": "Email Template",
"name": _("Leave Approval Notification"),
"response": response,
"subject": _("Leave Approval Notification"),
"owner": frappe.session.user,
}
]
records += [
{
"doctype": "Email Template",
"name": _("Leave Status Notification"),
"response": response,
"subject": _("Leave Status Notification"),
"owner": frappe.session.user,
}
]
# Interview
response = frappe.read_file(
os.path.join(base_path, "interview/interview_reminder_notification_template.html")
)
records += [
{
"doctype": "Email Template",
"name": _("Interview Reminder"),
"response": response,
"subject": _("Interview Reminder"),
"owner": frappe.session.user,
}
]
response = frappe.read_file(
os.path.join(base_path, "interview/interview_feedback_reminder_template.html")
)
records += [
{
"doctype": "Email Template",
"name": _("Interview Feedback Reminder"),
"response": response,
"subject": _("Interview Feedback Reminder"),
"owner": frappe.session.user,
}
]
# Exit Interview
response = frappe.read_file(
os.path.join(base_path, "exit_interview/exit_questionnaire_notification_template.html")
)
records += [
{
"doctype": "Email Template",
"name": _("Exit Questionnaire Notification"),
"response": response,
"subject": _("Exit Questionnaire Notification"),
"owner": frappe.session.user,
}
]
make_records(records)
def update_hr_defaults():
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.leave_approval_notification_template = _("Leave Approval Notification")
hr_settings.leave_status_notification_template = _("Leave Status Notification")
hr_settings.send_interview_reminder = 1
hr_settings.interview_reminder_template = _("Interview Reminder")
hr_settings.remind_before = "00:15:00"
hr_settings.send_interview_feedback_reminder = 1
hr_settings.feedback_reminder_notification_template = _("Interview Feedback Reminder")
hr_settings.exit_questionnaire_notification_template = _("Exit Questionnaire Notification")
hr_settings.save()
def set_single_defaults():
for dt in ("HR Settings", "Payroll Settings"):
default_values = frappe.get_all(
"DocField",
filters={"parent": dt},
fields=["fieldname", "default"],
as_list=True,
)
if default_values:
try:
doc = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
doc.set(fieldname, value)
doc.flags.ignore_mandatory = True
doc.save()
except frappe.ValidationError:
pass
def create_default_role_profiles():
for role_profile_name, roles in DEFAULT_ROLE_PROFILES.items():
if frappe.db.exists("Role Profile", role_profile_name):
continue
role_profile = frappe.new_doc("Role Profile")
role_profile.role_profile = role_profile_name
for role in roles:
role_profile.append("roles", {"role": role})
role_profile.insert(ignore_permissions=True)
def get_post_install_patches():
return (
"erpnext.patches.v13_0.move_tax_slabs_from_payroll_period_to_income_tax_slab",
"erpnext.patches.v13_0.move_doctype_reports_and_notification_from_hr_to_payroll",
"erpnext.patches.v13_0.move_payroll_setting_separately_from_hr_settings",
"erpnext.patches.v13_0.update_start_end_date_for_old_shift_assignment",
"erpnext.patches.v13_0.updates_for_multi_currency_payroll",
"erpnext.patches.v13_0.update_reason_for_resignation_in_employee",
"erpnext.patches.v13_0.set_company_in_leave_ledger_entry",
"erpnext.patches.v13_0.rename_stop_to_send_birthday_reminders",
"erpnext.patches.v13_0.set_training_event_attendance",
"erpnext.patches.v14_0.set_payroll_cost_centers",
"erpnext.patches.v13_0.update_employee_advance_status",
"erpnext.patches.v13_0.update_expense_claim_status_for_paid_advances",
"erpnext.patches.v14_0.delete_employee_transfer_property_doctype",
"erpnext.patches.v13_0.set_payroll_entry_status",
# HRMS
"create_country_fixtures",
"update_allocate_on_in_leave_type",
"update_performance_module_changes",
)
def run_post_install_patches():
print("\nPatching Existing Data...")
POST_INSTALL_PATCHES = get_post_install_patches()
frappe.flags.in_patch = True
try:
for patch in POST_INSTALL_PATCHES:
patch_name = patch.split(".")[-1]
if not patch_name:
continue
frappe.get_attr(f"hrms.patches.post_install.{patch_name}.execute")()
finally:
frappe.flags.in_patch = False
# LENDING APP SETUP & CLEANUP
def create_salary_slip_loan_fields():
if "lending" in frappe.get_installed_apps():
create_custom_fields(get_salary_slip_loan_fields(), ignore_validate=True)
def add_lending_docperms_to_ess():
doc = frappe.get_doc("User Type", "Employee Self Service")
loan_docperms = get_lending_docperms_for_ess()
append_docperms_to_user_type(loan_docperms, doc)
doc.flags.ignore_links = True
doc.save(ignore_permissions=True)
def remove_lending_docperms_from_ess():
doc = frappe.get_doc("User Type", "Employee Self Service")
loan_docperms = get_lending_docperms_for_ess()
for row in list(doc.user_doctypes):
if row.document_type in loan_docperms:
doc.user_doctypes.remove(row)
doc.save(ignore_permissions=True)
# ESS USER TYPE SETUP & CLEANUP
def add_non_standard_user_types():
user_types = get_user_types_data()
update_user_type_doctype_limit(user_types)
for user_type, data in user_types.items():
create_custom_role(data)
create_user_type(user_type, data)
def update_user_type_doctype_limit(user_types=None):
if not user_types:
user_types = get_user_types_data()
user_type_limit = {}
for user_type, __ in user_types.items():
user_type_limit.setdefault(frappe.scrub(user_type), 40)
update_site_config("user_type_doctype_limit", user_type_limit)
def get_user_types_data():
return {
"Employee Self Service": {
"role": "Employee Self Service",
"apply_user_permission_on": "Employee",
"user_id_field": "user_id",
"doctypes": {
# masters
"Holiday List": ["read"],
"Employee": ["read", "write"],
"Company": ["read"],
# payroll
"Salary Slip": ["read"],
"Employee Benefit Application": ["read", "write", "create", "delete"],
# expenses
"Expense Claim": ["read", "write", "create", "delete"],
"Expense Claim Type": ["read"],
"Employee Advance": ["read", "write", "create", "delete"],
# leave and attendance
"Leave Application": ["read", "write", "create", "delete"],
"Attendance Request": ["read", "write", "create", "delete"],
"Compensatory Leave Request": ["read", "write", "create", "delete"],
# tax
"Employee Tax Exemption Declaration": ["read", "write", "create", "delete"],
"Employee Tax Exemption Proof Submission": ["read", "write", "create", "delete"],
# projects
"Timesheet": ["read", "write", "create", "delete", "submit", "cancel", "amend"],
# trainings
"Training Program": ["read"],
"Training Feedback": ["read", "write", "create", "delete", "submit", "cancel", "amend"],
# shifts
"Employee Checkin": ["read"],
"Shift Request": ["read", "write", "create", "delete", "submit", "cancel", "amend"],
# misc
"Employee Grievance": ["read", "write", "create", "delete"],
"Employee Referral": ["read", "write", "create", "delete"],
"Travel Request": ["read", "write", "create", "delete"],
},
}
}
def get_lending_docperms_for_ess():
return {
"Loan": ["read"],
"Loan Application": ["read", "write", "create", "delete", "submit"],
"Loan Product": ["read"],
}
def create_custom_role(data):
if data.get("role") and not frappe.db.exists("Role", data.get("role")):
frappe.get_doc(
{"doctype": "Role", "role_name": data.get("role"), "desk_access": 1, "is_custom": 1}
).insert(ignore_permissions=True)
def create_user_type(user_type, data):
if frappe.db.exists("User Type", user_type):
doc = frappe.get_cached_doc("User Type", user_type)
doc.user_doctypes = []
else:
doc = frappe.new_doc("User Type")
doc.update(
{
"name": user_type,
"role": data.get("role"),
"user_id_field": data.get("user_id_field"),
"apply_user_permission_on": data.get("apply_user_permission_on"),
}
)
docperms = data.get("doctypes")
if doc.role == "Employee Self Service" and "lending" in frappe.get_installed_apps():
docperms.update(get_lending_docperms_for_ess())
append_docperms_to_user_type(docperms, doc)
doc.flags.ignore_links = True
doc.save(ignore_permissions=True)
def append_docperms_to_user_type(docperms, doc):
existing_doctypes = [d.document_type for d in doc.user_doctypes]
for doctype, perms in docperms.items():
if doctype in existing_doctypes:
continue
args = {"document_type": doctype}
for perm in perms:
args[perm] = 1
doc.append("user_doctypes", args)
def update_select_perm_after_install():
if not frappe.flags.update_select_perm_after_migrate:
return
frappe.flags.ignore_select_perm = False
for row in frappe.get_all("User Type", filters={"is_standard": 0}):
print("Updating user type :- ", row.name)
doc = frappe.get_doc("User Type", row.name)
doc.flags.ignore_links = True
doc.save()
frappe.flags.update_select_perm_after_migrate = False
def delete_custom_fields(custom_fields: dict):
"""
:param custom_fields: a dict like `{'Salary Slip': [{fieldname: 'loans', ...}]}`
"""
for doctype, fields in custom_fields.items():
frappe.db.delete(
"Custom Field",
{
"fieldname": ("in", [field["fieldname"] for field in fields]),
"dt": doctype,
},
)
frappe.clear_cache(doctype=doctype)
DEFAULT_ROLE_PROFILES = {
"HR": [
"HR User",
"HR Manager",
"Leave Approver",
"Expense Approver",
],
}
def get_salary_slip_loan_fields():
return {
"Salary Slip": [
{
"fieldname": "loan_repayment_sb_1",
"fieldtype": "Section Break",
"label": _("Loan Repayment"),
"depends_on": "total_loan_repayment",
"insert_after": "base_total_deduction",
},
{
"fieldname": "loans",
"fieldtype": "Table",
"label": _("Employee Loan"),
"options": "Salary Slip Loan",
"print_hide": 1,
"insert_after": "loan_repayment_sb_1",
},
{
"fieldname": "loan_details_sb_1",
"fieldtype": "Section Break",
"depends_on": "eval:doc.docstatus != 0",
"insert_after": "loans",
},
{
"fieldname": "total_principal_amount",
"fieldtype": "Currency",
"label": _("Total Principal Amount"),
"default": "0",
"options": "Company:company:default_currency",
"read_only": 1,
"insert_after": "loan_details_sb_1",
},
{
"fieldname": "total_interest_amount",
"fieldtype": "Currency",
"label": _("Total Interest Amount"),
"default": "0",
"options": "Company:company:default_currency",
"read_only": 1,
"insert_after": "total_principal_amount",
},
{
"fieldname": "loan_cb_1",
"fieldtype": "Column Break",
"insert_after": "total_interest_amount",
},
{
"fieldname": "total_loan_repayment",
"fieldtype": "Currency",
"label": _("Total Loan Repayment"),
"default": "0",
"options": "Company:company:default_currency",
"read_only": 1,
"insert_after": "loan_cb_1",
},
],
"Loan": [
{
"default": "0",
"depends_on": 'eval:doc.applicant_type=="Employee"',
"fieldname": "repay_from_salary",
"fieldtype": "Check",
"label": _("Repay From Salary"),
"insert_after": "status",
},
],
"Loan Repayment": [
{
"default": "0",
"fieldname": "repay_from_salary",
"fieldtype": "Check",
"label": _("Repay From Salary"),
"insert_after": "is_term_loan",
},
{
"depends_on": "eval:doc.repay_from_salary",
"fieldname": "payroll_payable_account",
"fieldtype": "Link",
"label": _("Payroll Payable Account"),
"mandatory_depends_on": "eval:doc.repay_from_salary",
"options": "Account",
"insert_after": "payment_account",
},
{
"default": "0",
"depends_on": 'eval:doc.applicant_type=="Employee"',
"fieldname": "process_payroll_accounting_entry_based_on_employee",
"hidden": 1,
"fieldtype": "Check",
"label": _("Process Payroll Accounting Entry based on Employee"),
"insert_after": "repay_from_salary",
},
],
}
|
2302_79757062/hrms
|
hrms/setup.py
|
Python
|
agpl-3.0
| 24,571
|
import requests
import frappe
STANDARD_ROLES = [
# standard roles
"Administrator",
"All",
"Guest",
# accounts
"Accounts Manager",
"Accounts User",
# projects
"Projects User",
"Projects Manager",
# framework
"Blogger",
"Dashboard Manager",
"Inbox User",
"Newsletter Manager",
"Prepared Report User",
"Report Manager",
"Script Manager",
"System Manager",
"Website Manager",
"Workspace Manager",
]
@frappe.whitelist(allow_guest=True)
def get_add_on_details(plan: str) -> dict[str, int]:
"""
Returns the number of employees to be billed under add-ons for SAAS subscription
site_details = {
"country": "India",
"plan": "Basic",
"credit_balance": 1000,
"add_ons": {
"employee": 2,
},
"expiry_date": "2021-01-01", # as per current usage
}
"""
EMPLOYEE_LIMITS = {"Basic": 25, "Essential": 50, "Professional": 100}
add_on_details = {}
employees_included_in_plan = EMPLOYEE_LIMITS.get(plan)
if employees_included_in_plan:
active_employees = get_active_employees()
add_on_employees = (
active_employees - employees_included_in_plan
if active_employees > employees_included_in_plan
else 0
)
else:
add_on_employees = 0
add_on_details["employees"] = add_on_employees
return add_on_details
def get_active_employees() -> int:
return frappe.db.count("Employee", {"status": "Active"})
@frappe.whitelist(allow_guest=True)
def subscription_updated(app: str, plan: str):
if app in ["hrms", "erpnext"] and plan:
update_erpnext_access()
def update_erpnext_access(user_input: dict | None):
"""
Called from hooks after setup wizard completion, ignored if user has no hrms subscription
enables erpnext workspaces and roles if user has subscribed to both hrms and erpnext
disables erpnext workspaces and roles if user has subscribed to hrms but not erpnext
"""
if not frappe.utils.get_url().endswith(".frappehr.com"):
return
update_erpnext_workspaces(True)
update_erpnext_roles(True)
set_app_logo()
def update_erpnext_workspaces(disable: bool = True):
erpnext_workspaces = [
"Home",
"Assets",
"Accounting",
"Buying",
"CRM",
"Manufacturing",
"Quality",
"Selling",
"Stock",
"Support",
]
for workspace in erpnext_workspaces:
try:
workspace_doc = frappe.get_doc("Workspace", workspace)
workspace_doc.flags.ignore_links = True
workspace_doc.flags.ignore_validate = True
workspace_doc.public = 0 if disable else 1
workspace_doc.save()
except Exception:
frappe.clear_messages()
def update_erpnext_roles(disable: bool = True):
roles = get_erpnext_roles()
for role in roles:
try:
role_doc = frappe.get_doc("Role", role)
role_doc.disabled = disable
role_doc.flags.ignore_links = True
role_doc.save()
except Exception:
pass
def set_app_logo():
frappe.db.set_single_value("Navbar Settings", "app_logo", "/assets/hrms/images/frappe-hr-logo.svg")
def get_erpnext_roles() -> set:
erpnext_roles = get_roles_for_app("erpnext")
hrms_roles = get_roles_for_app("hrms")
return erpnext_roles - hrms_roles - set(STANDARD_ROLES)
def get_roles_for_app(app_name: str) -> set:
erpnext_modules = get_modules_by_app(app_name)
doctypes = get_doctypes_by_modules(erpnext_modules)
roles = roles_by_doctype(doctypes)
return roles
def get_modules_by_app(app_name: str) -> list:
return frappe.db.get_all("Module Def", filters={"app_name": app_name}, pluck="name")
def get_doctypes_by_modules(modules: list) -> list:
return frappe.db.get_all("DocType", filters={"module": ("in", modules)}, pluck="name")
def roles_by_doctype(doctypes: list) -> set:
roles = []
for d in doctypes:
permissions = frappe.get_meta(d).permissions
for d in permissions:
roles.append(d.role)
return set(roles)
def hide_erpnext() -> bool:
hr_subscription = has_subscription(frappe.conf.sk_hrms)
erpnext_subscription = has_subscription(frappe.conf.sk_erpnext_smb or frappe.conf.sk_erpnext)
if not hr_subscription:
return False
if hr_subscription and erpnext_subscription:
# subscribed for ERPNext
return False
# no subscription for ERPNext
return True
def has_subscription(secret_key) -> bool:
url = f"https://frappecloud.com/api/method/press.api.developer.marketplace.get_subscription_status?secret_key={secret_key}"
response = requests.request(method="POST", url=url, timeout=5)
status = response.json().get("message")
return True if status == "Active" else False
|
2302_79757062/hrms
|
hrms/subscription_utils.py
|
Python
|
agpl-3.0
| 4,455
|
<div class="gray-container text-center">
<div>
{% for person in anniversary_persons %}
{% if person.image %}
<img
class="avatar-frame standard-image"
src="{{ person.image }}"
style="{{ css_style or '' }}"
title="{{ person.name }}">
</span>
{% else %}
<span
class="avatar-frame standard-image"
style="{{ css_style or '' }}"
title="{{ person.name }}">
{{ frappe.utils.get_abbr(person.name) }}
</span>
{% endif %}
{% endfor %}
</div>
<div style="margin-top: 15px">
<span>{{ reminder_text }}</span>
<p class="text-muted">{{ message }}</p>
</div>
</div>
|
2302_79757062/hrms
|
hrms/templates/emails/anniversary_reminder.html
|
HTML
|
agpl-3.0
| 621
|
<div class="gray-container text-center">
<div>
{% for person in birthday_persons %}
{% if person.image %}
<img
class="avatar-frame standard-image"
src="{{ person.image }}"
style="{{ css_style or '' }}"
title="{{ person.name }}">
</span>
{% else %}
<span
class="avatar-frame standard-image"
style="{{ css_style or '' }}"
title="{{ person.name }}">
{{ frappe.utils.get_abbr(person.name) }}
</span>
{% endif %}
{% endfor %}
</div>
<div style="margin-top: 15px">
<span>{{ reminder_text }}</span>
<p class="text-muted">{{ message }}</p>
</div>
</div>
|
2302_79757062/hrms
|
hrms/templates/emails/birthday_reminder.html
|
HTML
|
agpl-3.0
| 612
|
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<h3>{{ title }}</h3>
</tr>
</table>
{% for reply in replies %}
<table class="panel-header" border="0" cellpadding="0" cellspacing="0" width="100%">
<tr height="10"></tr>
<tr>
<td width="15"></td>
<td valign="top" width="24">
{% if reply.image %}
<img class="sender-avatar" width="24" height="24" embed="{{ reply.image }}"/>
{% else %}
<div class="sender-avatar-placeholder">
{{ reply.sender_name[0] }}
</div>
{% endif %}
</td>
<td width="10"></td>
<td>
<div class="text-medium text-muted">
<span>{{ reply.sender_name }}</span>
</div>
</td>
<td width="15"></td>
</tr>
<tr height="10"></tr>
</table>
<table class="panel-body" border="0" cellpadding="0" cellspacing="0" width="100%">
<tr height="10"></tr>
<tr>
<td width="15"></td>
<td>
<div>
{{ reply.content }}
</div>
</td>
<td width="15"></td>
</tr>
<tr height="10"></tr>
</table>
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr height="20"></tr>
</table>
{% endfor %}
{% if did_not_reply %}
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<div class="text-muted">
<p>{{ did_not_reply_title }}: {{ did_not_reply }}</p>
</div>
</tr>
</table>
{% endif %}
|
2302_79757062/hrms
|
hrms/templates/emails/daily_work_summary.html
|
HTML
|
agpl-3.0
| 1,299
|
<div>
<span>{{ reminder_text }}</span>
<p class="text-muted">{{ message }}</p>
</div>
{% if advance_holiday_reminder %}
{% if holidays | len > 0 %}
<ol>
{% for holiday in holidays %}
<li>{{ frappe.format(holiday.holiday_date, 'Date') }} - {{ holiday.description }}</li>
{% endfor %}
</ol>
{% else %}
<p>You have no upcoming holidays this {{ frequency }}.</p>
{% endif %}
{% endif %}
|
2302_79757062/hrms
|
hrms/templates/emails/holiday_reminder.html
|
HTML
|
agpl-3.0
| 444
|
<h3>{{_("Training Event")}}</h3>
<p>{{ message }}</p>
<h4>{{_("Details")}}</h4>
{{_("Event Name")}}: <a href="{{ event_link }}">{{ name }}</a>
<br>{{_("Event Location")}}: {{ location }}
<br>{{_("Start Time")}}: {{ start_time }}
<br>{{_("End Time")}}: {{ end_time }}
<br>{{_("Attendance")}}: {{ attendance }}
<h4>{{_("Update Response")}}</h4>
{% if not self_study %}
<p>{{_("Please update your status for this training event")}}:</p>
<form action="{{ confirm_link }}"><input style="display:inline-block" type="submit" value="Confirm Attendance" /></form>
<form action="{{ reject_link }}"><input style="display:inline-block" type="submit" value="Reject Invitation" /></form>
{% else %}
<p>{{_("Please confirm once you have completed your training")}}:</p>
<form action="{{ complete_link }}"><input style="display:inline-block" type="submit" value="Completed Training" /></form>
{% endif %}
<p>{{_("Thank you")}},<br>
{{ user_fullname }}</p>
|
2302_79757062/hrms
|
hrms/templates/emails/training_event.html
|
HTML
|
agpl-3.0
| 942
|
{% extends "templates/web.html" %}
{% block page_content %}
<div class="py-12">
<!-- Header -->
<div class="row">
<div class="col-md-9 mb-8">
<h1
class="mt-0 mb-2 mb-md-3 mx-auto mx-md-0 text-center text-md-left"
style="@include media-breakpoint-up(md) {font-size: 3rem}"
>
{{ job_title }}
</h1>
<div class="flex align-items-center">
<div class="mx-auto mx-md-0">
<span class="font-weight-bold">{{ company }}</span>
{{ " · " }}
{{ posted_on }}
</div>
</div>
</div>
<div class="col-md-3 flex">
<div class="ml-auto d-none d-md-block">
{%- if status == "Open" -%}
<a
class="btn btn-primary btn-lg"
href="/{{ job_application_route if job_application_route else 'job_application' }}/new?job_title={{ name }}"
>
{{ _("Apply Now") }}
</a>
{%- else -%}
<div
class="py-4 px-10 font-weight-bold text-nowrap"
style="background: var(--bg-gray);
font-size: 1.1rem;
border-radius: var(--border-radius)"
>
{{ _("Opening closed.") }}
</div>
{% endif %}
</div>
</div>
</div>
<!-- Job Opening Details -->
<div class="mb-md-4 p-md-4">
<div class="row">
{%- if location -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--purple-50)">
<svg
class="icon"
style="height: 28px; width: 28px"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
>
<g stroke="var(--purple-700)" stroke-miterlimit="10">
<path d="M11.467 3.458c1.958 1.957 1.958 5.088.027 7.02L7.97 14l-3.523-3.523a4.945 4.945 0 010-6.993l.026-.026a4.922 4.922 0 016.993 0zm0 0c-.026-.026-.026-.026 0 0z">
</path>
<path d="M7.971 8.259a1.305 1.305 0 100-2.61 1.305 1.305 0 000 2.61z"></path>
</g>
</svg>
</div>
<div class="ml-5">
<div class="text-secondary text-uppercase" style="font-size: 11px">{{ _("Location") }}</div>
<div class="font-weight-bold">{{ location }}</div>
</div>
</div>
</div>
{% endif %}
{%- if department -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--blue-50)">
<svg
class="icon"
style="height: 28px;
width: 28px;
--icon-stroke: var(--blue-700)"
>
<use href="#icon-branch"></use>
</svg>
</div>
<div class="ml-5">
<div class="text-secondary text-uppercase" style="font-size: 11px">{{ _("Department") }}</div>
<div class="font-weight-bold">{{ department }}</div>
</div>
</div>
</div>
{% endif %}
{%- if publish_salary_range -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--green-50)">
<svg
class="icon"
style="height: 28px;
width: 28px;
--icon-stroke: var(--green-700)"
>
<use href="#icon-income"></use>
</svg>
</div>
<div class="ml-5">
<div class="text-secondary text-uppercase" style="font-size: 11px">{{ _("Salary Range") }}</div>
<div class="font-weight-bold">
{%- if lower_range -%}
{{ frappe.format_value(frappe.utils.flt(lower_range) , currency=currency) }}
{% endif %}
{%- if lower_range and upper_range -%}
{{ " - " }}
{% endif %}
{%- if upper_range -%}
{{ frappe.format_value(frappe.utils.flt(upper_range) , currency=currency) }}
{% endif %}
/
{{ salary_per.lower() }}
</div>
</div>
</div>
</div>
{% endif %}
{%- if employment_type -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--yellow-50)">
<svg
class="icon"
style="height: 28px;
width: 28px;
--icon-stroke: var(--yellow-700)"
>
<use href="#icon-hr"></use>
</svg>
</div>
<div class="ml-5">
<div class="text-secondary text-uppercase" style="font-size: 11px">{{ _("Employment Type") }}</div>
<div class="font-weight-bold">{{ employment_type }}</div>
</div>
</div>
</div>
{% endif %}
{%- if publish_applications_received -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--orange-50)">
<svg
class="icon"
style="height: 28px;
width: 28px;
--icon-stroke: var(--orange-700)"
>
<use href="#icon-users"></use>
</svg>
</div>
<div class="ml-5">
<div class="text-secondary text-uppercase" style="font-size: 11px">{{ _("Applications Received") }}</div>
<div class="font-weight-bold">{{ no_of_applications }}</div>
</div>
</div>
</div>
{%- endif -%}
{%- if (status == 'Open' and closes_on) or (status == 'Closed' and closed_on) -%}
<div class="col-12 col-md-4 mb-6 mb-md-8">
<div class="flex flex-row align-items-center">
<div class="rounded-circle p-4" style="background: var(--red-50)">
<svg
class="icon"
style="height: 28px;
width: 28px;
--icon-stroke: var(--red-700)"
>
<use href="#icon-calendar"></use>
</svg>
</div>
<div class="ml-5">
<div
class="text-secondary text-uppercase"
style="font-size: 11px"
>
{{ _("Closes On") if status == "Open" else _("Closed On") }}
</div>
<div class="font-weight-bold">
{{ frappe.format_date(closes_on if status == "Open" else closed_on, "d MMM, YYYY") }}
</div>
</div>
</div>
</div>
{% endif %}
</div>
</div>
{%- if description -%}<p>{{ description }}</p>{% endif %}
<!-- Mobile Apply Now Button -->
<div id="sticky-div" class="position-sticky d-md-none" style="bottom: 0; ">
<div class="w-100 bg-white py-8">
{%- if status == "Open" -%}
<a
class="btn btn-primary btn-lg w-100"
href="/{{ job_application_route if job_application_route else 'job_application' }}/new?job_title={{ name }}"
>
{{ _("Apply Now") }}
</a>
{%- else -%}
<div
class="py-4 px-10 text-center font-weight-bold text-nowrap"
style="background: var(--bg-gray);
font-size: 1.1rem;
border-radius: var(--border-radius)"
>
{{ _("Opening closed.") }}
</div>
{% endif %}
</div>
</div>
</div>
{% endblock page_content %}
|
2302_79757062/hrms
|
hrms/templates/generators/job_opening.html
|
HTML
|
agpl-3.0
| 6,827
|
<table class='table table-bordered'>
<caption>{{title}}</caption>
<thead>
<tr>
{% for key in keys %}
<th {% if key == "Total Pay"%} style="text-align: right;" {% endif %}> {{ key }} </th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for ss_dict in ss_list %}
<tr>
{% for key, value in ss_dict.items()|sort %}
<td {% if key == "Total Pay"%} align = "right" {% endif %}> {{value}} </td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
|
2302_79757062/hrms
|
hrms/templates/includes/salary_slip_log.html
|
HTML
|
agpl-3.0
| 465
|
import click
from hrms.setup import before_uninstall as remove_custom_fields
def before_uninstall():
try:
print("Removing customizations created by the Frappe HR app...")
remove_custom_fields()
except Exception as e:
BUG_REPORT_URL = "https://github.com/frappe/hrms/issues/new"
click.secho(
"Removing Customizations for Frappe HR failed due to an error."
" Please try again or"
f" report the issue on {BUG_REPORT_URL} if not resolved.",
fg="bright_red",
)
raise e
click.secho("Frappe HR app customizations have been removed successfully...", fg="green")
|
2302_79757062/hrms
|
hrms/uninstall.py
|
Python
|
agpl-3.0
| 586
|
from collections.abc import Generator
import requests
import frappe
from frappe.utils import add_days, date_diff
country_info = {}
@frappe.whitelist(allow_guest=True)
def get_country(fields=None):
global country_info
ip = frappe.local.request_ip
if ip not in country_info:
fields = ["countryCode", "country", "regionName", "city"]
res = requests.get(
"https://pro.ip-api.com/json/{ip}?key={key}&fields={fields}".format(
ip=ip, key=frappe.conf.get("ip-api-key"), fields=",".join(fields)
)
)
try:
country_info[ip] = res.json()
except Exception:
country_info[ip] = {}
return country_info[ip]
def get_date_range(start_date: str, end_date: str) -> list[str]:
"""returns list of dates between start and end dates"""
no_of_days = date_diff(end_date, start_date) + 1
return [add_days(start_date, i) for i in range(no_of_days)]
def generate_date_range(start_date: str, end_date: str, reverse: bool = False) -> Generator[str, None, None]:
no_of_days = date_diff(end_date, start_date) + 1
date_field = end_date if reverse else start_date
direction = -1 if reverse else 1
for n in range(no_of_days):
yield add_days(date_field, direction * n)
def get_employee_email(employee_id: str) -> str | None:
employee_emails = frappe.db.get_value(
"Employee",
employee_id,
["prefered_email", "user_id", "company_email", "personal_email"],
as_dict=True,
)
return (
employee_emails.prefered_email
or employee_emails.user_id
or employee_emails.company_email
or employee_emails.personal_email
)
|
2302_79757062/hrms
|
hrms/utils/__init__.py
|
Python
|
agpl-3.0
| 1,545
|
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe import _
@frappe.whitelist()
def get_all_nodes(method, company):
"""Recursively gets all data from nodes"""
method = frappe.get_attr(method)
if method not in frappe.whitelisted:
frappe.throw(_("Not Permitted"), frappe.PermissionError)
root_nodes = method(company=company)
result = []
nodes_to_expand = []
for root in root_nodes:
data = method(root.id, company)
result.append(dict(parent=root.id, parent_name=root.name, data=data))
nodes_to_expand.extend(
[{"id": d.get("id"), "name": d.get("name")} for d in data if d.get("expandable")]
)
while nodes_to_expand:
parent = nodes_to_expand.pop(0)
data = method(parent.get("id"), company)
result.append(dict(parent=parent.get("id"), parent_name=parent.get("name"), data=data))
for d in data:
if d.get("expandable"):
nodes_to_expand.append({"id": d.get("id"), "name": d.get("name")})
return result
|
2302_79757062/hrms
|
hrms/utils/hierarchy_chart.py
|
Python
|
agpl-3.0
| 1,015
|
import frappe
def get_holiday_dates_between(
holiday_list: str,
start_date: str,
end_date: str,
skip_weekly_offs: bool = False,
) -> list:
Holiday = frappe.qb.DocType("Holiday")
query = (
frappe.qb.from_(Holiday)
.select(Holiday.holiday_date)
.where((Holiday.parent == holiday_list) & (Holiday.holiday_date.between(start_date, end_date)))
.orderby(Holiday.holiday_date)
)
if skip_weekly_offs:
query = query.where(Holiday.weekly_off == 0)
return query.run(pluck=True)
def invalidate_cache(doc, method=None):
from hrms.payroll.doctype.salary_slip.salary_slip import HOLIDAYS_BETWEEN_DATES
frappe.cache().delete_value(HOLIDAYS_BETWEEN_DATES)
|
2302_79757062/hrms
|
hrms/utils/holiday_list.py
|
Python
|
agpl-3.0
| 667
|
import frappe
no_cache = 1
def get_context(context):
csrf_token = frappe.sessions.get_csrf_token()
frappe.db.commit() # nosempgrep
context = frappe._dict()
context.csrf_token = csrf_token
context.boot = get_boot()
return context
@frappe.whitelist(methods=["POST"], allow_guest=True)
def get_context_for_dev():
if not frappe.conf.developer_mode:
frappe.throw(frappe._("This method is only meant for developer mode"))
return get_boot()
def get_boot():
return frappe._dict(
{"site_name": frappe.local.site, "push_relay_server_url": frappe.conf.get("push_relay_server_url")}
)
|
2302_79757062/hrms
|
hrms/www/hrms.py
|
Python
|
agpl-3.0
| 595
|
body.jobs-page {
background: var(--gray-50);
}
h3.jobs-page {
font-size: 1.7rem;
}
h4.jobs-page {
font-size: 1.35rem;
}
.text-18 {
font-size: 18px;
}
.text-17 {
font-size: 17px;
}
.text-15 {
font-size: 15px;
}
.text-14 {
font-size: 14px;
}
.text-13 {
font-size: 13px;
}
.text-12 {
font-size: 12px;
}
.full-time-badge {
background: var(--bg-green);
color: var(--text-on-green);
border-radius: var(--border-radius);
}
.part-time-badge {
background: var(--bg-orange);
color: var(--text-on-orange);
border-radius: var(--border-radius);
}
.other-badge {
background: var(--bg-blue);
color: var(--text-on-blue);
border-radius: var(--border-radius);
}
.order-item:active {
background-color: var(--gray-200);
}
.job-card-footer {
background: var(--gray-100);
border-radius: 0 0 0.75rem 0.75rem;
}
.search-box-container {
width: 100%;
}
#search-box {
padding-left: 36px;
background-color: var(--bg-color);
}
.search-bar .search-icon {
position: absolute;
margin-left: 12px;
display: flex;
align-items: center;
height: 100%;
}
.filters-section .title-section {
border-bottom: 1px solid var(--gray-300);
}
.filters-drawer {
height: 80vh;
bottom: -80vh;
display: flex;
flex-direction: column;
left: 0;
transition: bottom 0.3s ease;
box-shadow: 0px -5px 15px rgba(0, 0, 0, 0.1);
border-radius: 16px 16px 0px 0px;
z-index: 5 !important;
}
.overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: rgba(0, 0, 0, 0.4);
display: none;
z-index: 3 !important;
}
|
2302_79757062/hrms
|
hrms/www/jobs/index.css
|
CSS
|
agpl-3.0
| 1,540
|
{% extends "templates/web.html" %}
{% block title %} {{ _("Job Openings") }} {% endblock title %}
{% block header %}
<h3 class="mt-0 mb-10 jobs-page">{{ _("Job Openings") }}</h3>
{% endblock header %}
{% block page_content %}
<meta
id="data"
data-filters="{{ all_filters }}"
data-no-of-pages="{{ no_of_pages }}"
/>
<div class="row">
<!-- Desktop Filters -->
<div class="col-3 text-15 d-none d-lg-block">
<div class="flex align-items-center">
<p class="text-18 font-weight-bold mb-0">{{ _("Filters") }}</p>
<a name="clear-filters" class="ml-auto" role="button">{{ _("Clear All") }}</a>
</div>
<hr class="mb-6" />
{% for name, values in all_filters.items() %}
<div class="mb-6">
<p class="font-weight-bold mb-4">
{{ name.title() | replace('_', ' ') }}
</p>
{% for value in values %}
<div class="form-group form-check">
<input
id="{{ 'desktop-' ~ value }}"
name="{{ name }}"
value="{{ value }}"
class="form-check-input desktop-filters"
type="checkbox"
role="button"
/>
<label
class="form-check-label align-top"
for="{{ 'desktop-' ~ value }}"
role="button"
>
{{ value }}
</label>
</div>
{% endfor %}
</div>
{% endfor %}
</div>
<div class="col-lg-9">
<div class="row px-4 mb-10 align-items-center">
<!-- Search -->
<div class="col-9 col-lg-8 px-0">
<div class="input-group search-bar flex text-muted">
<div class="search-box-container">
<input
type="search"
name="query"
id="search-box"
class="form-control border font-md h-100 desktop-filters mobile-filters"
placeholder="{{ _('Search for Jobs') }}"
aria-label="Jobs Search"
/>
</div>
<span class="search-icon">
<svg class="icon icon-sm"><use href="#icon-search"></use></svg>
</span>
</div>
</div>
<div class="col-3 col-lg-4 flex pr-0">
<div class="ml-auto flex align-items-center">
<div class="btn-group border h-100" style="border-radius: 8px">
<!-- Sort -->
{% set sort = frappe.form_dict.sort %}
<button id="sort" class="btn btn-default btn-order bg-white">
<span class="sort-order">
<svg class="icon icon-sm">
<use
class
href="#icon-sort-{{ 'ascending' if sort == 'asc' else 'descending' }}"
></use>
</svg>
</span>
</button>
<div
class="border-left px-3 flex align-items-center bg-white d-none d-lg-block"
style="border-radius: 0px 8px 8px 0px"
>
<span class="text-14">{{ _("Posting Date") }}</span>
</div>
<!-- Mobile Filter Button -->
<button
id="filter"
class="btn btn-default btn-order bg-white border-left d-lg-none"
>
<span class="sort-order">
<svg class="icon icon-sm">
<use
class
href="#icon-filter"
></use>
</svg>
</span>
</button>
</div>
</div>
</div>
</div>
<!-- Job Opening Cards -->
<p class="text-secondary mb-4 text-15">
{% set job_opening_count = job_openings|length %}
{{ _("Showing") + " " + frappe.utils.cstr(job_opening_count) + " " }}
{{ _("result") if job_opening_count == 1 else _("results")}}
</p>
<div class="row">
{% for jo in job_openings %}
<div class="mb-8 col-sm-6">
<div
id="{{ jo.route }}"
name="card"
class="card border h-100 flex flex-col"
role="button"
>
<div class="p-6">
<div class="flex mb-5">
<div class="col-12 {{ 'col-lg-9' if jo.employment_type }} px-0">
<h4
class="mt-0 mb-1 jobs-page text-truncate"
title="{{ jo.job_title }}"
>
{{ jo.job_title }}
</h4>
<div class="text-14">
<span class="font-weight-bold">{{ jo.company }}</span>
<span class="text-secondary">
{{ " · " }} {{ jo.posted_on }}
</span>
</div>
</div>
{%- if jo.employment_type -%}
<div class="col-3 px-0 flex d-none d-lg-flex">
<div class="ml-auto font-weight-bold text-nowrap text-12">
{%- if jo.employment_type == "Full-time" -%}
<div class="py-1 px-2 full-time-badge">
• {{ jo.employment_type }}
</div>
{%- elif jo.employment_type == "Part-time" -%}
<div class="py-1 px-2 part-time-badge">
• {{ jo.employment_type }}
</div>
{%- else -%}
<div class="py-1 px-2 other-badge">
• {{ jo.employment_type }}
</div>
{% endif %}
</div>
</div>
{% endif %}
</div>
<!-- Job Details -->
<div class="text-14">
{%- if jo.location -%}
<div class="mt-3 flex align-items-center">
<svg
class="icon ml-0 mr-1"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
>
<g stroke="var(--gray-700)" stroke-miterlimit="10">
<path
d="M11.467 3.458c1.958 1.957 1.958 5.088.027 7.02L7.97 14l-3.523-3.523a4.945 4.945 0 010-6.993l.026-.026a4.922 4.922 0 016.993 0zm0 0c-.026-.026-.026-.026 0 0z"
></path>
<path
d="M7.971 8.259a1.305 1.305 0 100-2.61 1.305 1.305 0 000 2.61z"
></path>
</g>
</svg>
{{ jo.location }}
</div>
{% endif %}
{%- if jo.department -%}
<div class="mt-3 flex align-items-center">
<svg
class="icon ml-0 mr-1"
style="--icon-stroke: var(--gray-700)"
>
<use href="#icon-branch"></use>
</svg>
{{ jo.department }}
</div>
{% endif %}
{%- if jo.publish_salary_range -%}
<div class="mt-3 flex align-items-center">
<svg
class="icon ml-0 mr-1"
style="--icon-stroke: var(--gray-700)"
>
<use href="#icon-income"></use>
</svg>
{%- if jo.lower_range -%}
{{ frappe.format_value(frappe.utils.flt(jo.lower_range), currency=jo.currency) }}
{% endif %}
{%- if jo.lower_range and jo.upper_range -%}
{{ " - " }}
{% endif %}
{%- if jo.upper_range -%}
{{ frappe.format_value(frappe.utils.flt(jo.upper_range), currency=jo.currency) }}
{% endif %}
/
{{ jo.salary_per.lower() }}
</div>
{% endif %}
</div>
</div>
<div class="px-4 py-2 job-card-footer mt-auto">
<div class="row text-12 text-secondary">
<p class="col-6 text-center mb-0 {{ 'border-right' if (jo.publish_applications_received or jo.closes_on) }}">
{%- if jo.publish_applications_received -%}
{{ _("Applications received:") + " " }}
<b>{{ jo.no_of_applications }}</b>
{% else %}
{% endif %}
</p>
<p class="col-6 text-center mb-0">
{%- if jo.closes_on -%}
{{ _("Closes on:") + " " }}
<b>{{ frappe.format_date(jo.closes_on, "d MMM, YYYY") }}</b>
{% endif %}
</p>
</div>
</div>
</div>
</div>
{% endfor %}
</div>
<!-- Pagination -->
{%- if no_of_pages > 1 -%}
<div class="mb-4 flex">
<div
class="btn-group mx-auto border rounded"
role="group"
aria-label="Pagination"
>
{% set page = frappe.form_dict.page %}
{% set page = '1' if (not page or page|int > no_of_pages or page|int < 1) else page %}
<button
id="previous"
class="btn btn-default border-right flex align-items-center bg-white"
>
<svg class="icon icon-sm" style="--icon-stroke: var(--gray-600)">
<use href="#icon-left"></use>
</svg>
</button>
<div class="flex bg-white">
{% set initial_page = 1 if page|int == 1 else ((page|int / 3 + 0.5) | round(method='floor')|int * 3 - 2) %}
{% set no_of_displayed_pages = 5 if no_of_pages - initial_page > 5 else no_of_pages - initial_page + 1 %}
{% for i in range(no_of_displayed_pages) %}
{% set pg = i + initial_page %}
<button
id="{{ pg }}"
name="pagination"
class="btn btn-default text-muted rounded-0"
style="background-color: {{ 'var(--gray-100)' if pg|string == page else 'white' }}"
>
{% if (loop.first and pg != 1) or (loop.last and pg != no_of_pages) %}
<span>...</span>
{% else %}
<span>{{ pg }}</span>
{% endif %}
</button>
{% endfor %}
</div>
<button
id="next"
class="btn btn-default border-left flex align-items-center bg-white"
>
<svg class="icon icon-sm" style="--icon-stroke: var(--gray-600)">
<use href="#icon-right"></use>
</svg>
</button>
</div>
</div>
{% endif %}
</div>
<!-- Mobile Filters -->
<div
id="filters-drawer"
class="filters-drawer position-fixed bg-white w-100 d-lg-none"
>
<div class="flex align-items-center py-4 px-6 border-bottom">
<p class="text-18 font-weight-bold mb-0">{{ _("Filters") }}</p>
<div name="close-filters-drawer" class="ml-auto">
<svg class="icon icon-lg">
<use href="#icon-close"></use>
</svg>
</div>
</div>
<div class="px-6 pt-6 flex-grow-1 overflow-auto">
{% for name, values in all_filters.items() %}
<div class="mb-6">
<p class="font-weight-bold mb-4">
{{ name.title() | replace('_', ' ') }}
</p>
{% for value in values %}
<div class="form-group form-check">
<input
id="{{ 'mobile-' ~ value }}"
name="{{ name }}"
value="{{ value }}"
class="form-check-input mobile-filters"
type="checkbox"
role="button"
/>
<label
class="form-check-label align-top"
for="{{ 'mobile-' ~ value }}"
role="button"
>
{{ value }}
</label>
</div>
{% endfor %}
</div>
{% endfor %}
</div>
<div class="flex align-items-center py-4 border-top">
<a name="clear-filters" class="text-17 text-center w-50 mx-6" role="button">{{ _("Clear All") }}</a>
<a id="apply-filters" class="btn btn-primary btn-lg w-50 mx-6">{{ _("Apply") }}</a>
</div>
</div>
<div id="overlay" name="close-filters-drawer" class="overlay d-lg-none"></div>
</div>
{% endblock page_content %}
|
2302_79757062/hrms
|
hrms/www/jobs/index.html
|
HTML
|
agpl-3.0
| 10,251
|
$(() => {
const query_params = frappe.utils.get_query_params();
update_ui_with_filters();
$(".desktop-filters").change(function () {
update_params(get_new_params(".desktop-filters"));
});
$("#apply-filters").on("click", function () {
update_params(get_new_params(".mobile-filters"));
});
$("[name=clear-filters]").on("click", function () {
update_params();
});
$("#filter").click(function () {
scroll_up_and_execute(() => {
$("#filters-drawer").css("bottom", 0);
$("#overlay").show();
$("html, body").css({
overflow: "hidden",
height: "100%",
});
});
});
$("[name=close-filters-drawer").click(function () {
$("#filters-drawer").css("bottom", "-80vh");
$("#overlay").hide();
$("html, body").css({
overflow: "auto",
height: "auto",
});
});
$("#search-box").bind("search", function () {
update_params(get_new_params(".desktop-filters"));
});
$("#search-box").keyup(function (e) {
if (e.keyCode == 13) {
$(this).trigger("search");
}
});
$("#sort").on("click", function () {
const filters = $(".desktop-filters").serialize();
query_params.sort === "asc"
? update_params(filters)
: update_params(filters + "&sort=asc");
});
$("[name=card]").on("click", function () {
window.location.href = this.id;
});
$("[name=pagination]").on("click", function () {
const filters = $(".desktop-filters").serialize();
update_params(filters + "&page=" + this.id);
});
$("#previous").on("click", function () {
const new_page = (Number(query_params?.page) || 1) - 1;
const filters = $(".desktop-filters").serialize();
update_params(filters + "&page=" + new_page);
});
$("#next").on("click", function () {
const new_page = (Number(query_params?.page) || 1) + 1;
const filters = $(".desktop-filters").serialize();
update_params(filters + "&page=" + new_page);
});
function update_ui_with_filters() {
const allowed_filters = Object.keys(
JSON.parse($("#data").data("filters").replace(/'/g, '"')),
);
for (const filter in query_params) {
if (filter === "query") $("#search-box").val(query_params["query"]);
else if (filter === "page") disable_inapplicable_pagination_buttons();
else if (allowed_filters.includes(filter)) {
if (typeof query_params[filter] === "string") {
$("#desktop-" + $.escapeSelector(query_params[filter])).prop("checked", true);
$("#mobile-" + $.escapeSelector(query_params[filter])).prop("checked", true);
} else
for (const d of query_params[filter]) {
$("#desktop-" + $.escapeSelector(d)).prop("checked", true);
$("#mobile-" + $.escapeSelector(d)).prop("checked", true);
}
} else continue;
}
}
function disable_inapplicable_pagination_buttons() {
const no_of_pages = JSON.parse($("#data").data("no-of-pages"));
const page_no = Number(query_params["page"]);
if (page_no === no_of_pages) {
$("#next").prop("disabled", true);
} else if (page_no > no_of_pages || page_no <= 1) {
$("#previous").prop("disabled", true);
}
}
function get_new_params(filter_group) {
return "sort" in query_params
? $(filter_group).serialize() + "&" + $.param({ sort: query_params["sort"] })
: $(filter_group).serialize();
}
});
function update_params(params = "") {
if ($("#filters-drawer").css("bottom") != "0px")
return scroll_up_and_execute(() => (window.location.href = "/jobs?" + params));
$("#filters-drawer").css("bottom", "-80vh");
$("#filters-drawer").on("transitionend webkitTransitionEnd oTransitionEnd", () =>
scroll_up_and_execute(() => (window.location.href = "/jobs?" + params)),
);
}
function scroll_up_and_execute(callback) {
if (window.scrollY === 0) return callback();
function execute_after_scrolling_up() {
if (window.scrollY === 0) {
callback();
window.removeEventListener("scroll", execute_after_scrolling_up);
}
}
window.scroll({
top: 0,
behavior: "smooth",
});
window.addEventListener("scroll", execute_after_scrolling_up);
}
|
2302_79757062/hrms
|
hrms/www/jobs/index.js
|
JavaScript
|
agpl-3.0
| 3,970
|
import math
import frappe
from frappe import _
from frappe.query_builder import Order
from frappe.query_builder.functions import Count
from frappe.utils import pretty_date
def get_context(context):
context.no_cache = 1
context.parents = [{"name": _("My Account"), "route": "/"}]
context.body_class = "jobs-page"
page_len = 20
filters, txt, sort, offset = get_filters_txt_sort_offset(page_len)
context.job_openings = get_job_openings(filters, txt, sort, page_len, offset)
context.no_of_pages = get_no_of_pages(filters, txt, page_len)
context.all_filters = get_all_filters(filters)
context.sort = sort
def get_job_openings(filters=None, txt=None, sort=None, limit=20, offset=0):
jo = frappe.qb.DocType("Job Opening")
ja = frappe.qb.DocType("Job Applicant")
query = (
frappe.qb.from_(jo)
.left_join(ja)
.on(ja.job_title == jo.name)
.select(
jo.name,
jo.status,
jo.job_title,
jo.description,
jo.publish_applications_received,
jo.publish_salary_range,
jo.lower_range,
jo.upper_range,
jo.currency,
jo.job_application_route,
jo.salary_per,
jo.route,
jo.location,
jo.department,
jo.employment_type,
jo.company,
jo.posted_on,
jo.closes_on,
Count(ja.job_title).as_("no_of_applications"),
)
.where((jo.status == "Open") & (jo.publish == 1))
.groupby(jo.name)
.limit(limit)
.offset(offset)
)
for d in filters:
query = query.where(frappe.qb.Field(d).isin(filters[d]))
if txt:
query = query.where((jo.job_title.like(f"%{txt}%")) | (jo.description.like(f"%{txt}%")))
query = query.orderby("posted_on", order=Order.asc if sort == "asc" else Order.desc)
results = query.run(as_dict=True)
for d in results:
d.posted_on = pretty_date(d.posted_on)
return results
def get_no_of_pages(filters=None, txt=None, page_length=20):
jo = frappe.qb.DocType("Job Opening")
query = (
frappe.qb.from_(jo)
.select(
Count("*").as_("no_of_openings"),
)
.where((jo.status == "Open") & (jo.publish == 1))
)
for d in filters:
query = query.where(frappe.qb.Field(d).isin(filters[d]))
if txt:
query = query.where((jo.job_title.like(f"%{txt}%")) | (jo.description.like(f"%{txt}%")))
result = query.run(as_dict=True)
return math.ceil(result[0].no_of_openings / page_length)
def get_all_filters(filters=None):
job_openings = frappe.get_all(
"Job Opening",
filters={"publish": 1, "status": "Open"},
fields=["company", "department", "employment_type", "location"],
)
companies = filters.get("company", [])
all_filters = {}
for opening in job_openings:
for key, value in opening.items():
if value and (key == "company" or not companies or opening.company in companies):
all_filters.setdefault(key, set()).add(value)
return {key: sorted(value) for key, value in all_filters.items()}
def get_filters_txt_sort_offset(page_len=20):
args = frappe.request.args.to_dict(flat=False)
filters = {}
txt = ""
sort = None
offset = 0
allowed_filters = ["company", "department", "employment_type", "location"]
for d in args:
if d in allowed_filters:
filters[d] = args[d]
elif d == "query":
txt = args["query"][0]
elif d == "sort":
if args["sort"][0]:
sort = args["sort"][0]
elif d == "page":
offset = (int(args["page"][0]) - 1) * page_len
return filters, txt, sort, offset
|
2302_79757062/hrms
|
hrms/www/jobs/index.py
|
Python
|
agpl-3.0
| 3,310
|
import frappe
def get_context(context):
csrf_token = frappe.sessions.get_csrf_token()
frappe.db.commit() # nosempgrep
context = frappe._dict()
context.csrf_token = csrf_token
return context
|
2302_79757062/hrms
|
hrms/www/roster.py
|
Python
|
agpl-3.0
| 198
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" href="/favicon.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Roster</title>
</head>
<body class="text-gray-800">
<div id="app"></div>
<div id="modals"></div>
<div id="popovers"></div>
<script>
window.csrf_token = "{{ csrf_token }}";
</script>
<script type="module" src="/src/main.ts"></script>
</body>
</html>
|
2302_79757062/hrms
|
roster/index.html
|
HTML
|
agpl-3.0
| 462
|
module.exports = {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};
|
2302_79757062/hrms
|
roster/postcss.config.js
|
JavaScript
|
agpl-3.0
| 77
|
<template>
<div>
<router-view />
</div>
</template>
|
2302_79757062/hrms
|
roster/src/App.vue
|
Vue
|
agpl-3.0
| 56
|
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 100;
font-display: swap;
src:
url("Inter-Thin.woff2?v=3.12") format("woff2"),
url("Inter-Thin.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 100;
font-display: swap;
src:
url("Inter-ThinItalic.woff2?v=3.12") format("woff2"),
url("Inter-ThinItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 200;
font-display: swap;
src:
url("Inter-ExtraLight.woff2?v=3.12") format("woff2"),
url("Inter-ExtraLight.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 200;
font-display: swap;
src:
url("Inter-ExtraLightItalic.woff2?v=3.12") format("woff2"),
url("Inter-ExtraLightItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 300;
font-display: swap;
src:
url("Inter-Light.woff2?v=3.12") format("woff2"),
url("Inter-Light.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 300;
font-display: swap;
src:
url("Inter-LightItalic.woff2?v=3.12") format("woff2"),
url("Inter-LightItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 400;
font-display: swap;
src:
url("Inter-Regular.woff2?v=3.12") format("woff2"),
url("Inter-Regular.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 400;
font-display: swap;
src:
url("Inter-Italic.woff2?v=3.12") format("woff2"),
url("Inter-Italic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 500;
font-display: swap;
src:
url("Inter-Medium.woff2?v=3.12") format("woff2"),
url("Inter-Medium.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 500;
font-display: swap;
src:
url("Inter-MediumItalic.woff2?v=3.12") format("woff2"),
url("Inter-MediumItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 600;
font-display: swap;
src:
url("Inter-SemiBold.woff2?v=3.12") format("woff2"),
url("Inter-SemiBold.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 600;
font-display: swap;
src:
url("Inter-SemiBoldItalic.woff2?v=3.12") format("woff2"),
url("Inter-SemiBoldItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 700;
font-display: swap;
src:
url("Inter-Bold.woff2?v=3.12") format("woff2"),
url("Inter-Bold.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 700;
font-display: swap;
src:
url("Inter-BoldItalic.woff2?v=3.12") format("woff2"),
url("Inter-BoldItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 800;
font-display: swap;
src:
url("Inter-ExtraBold.woff2?v=3.12") format("woff2"),
url("Inter-ExtraBold.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 800;
font-display: swap;
src:
url("Inter-ExtraBoldItalic.woff2?v=3.12") format("woff2"),
url("Inter-ExtraBoldItalic.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: normal;
font-weight: 900;
font-display: swap;
src:
url("Inter-Black.woff2?v=3.12") format("woff2"),
url("Inter-Black.woff?v=3.12") format("woff");
}
@font-face {
font-family: "Inter";
font-style: italic;
font-weight: 900;
font-display: swap;
src:
url("Inter-BlackItalic.woff2?v=3.12") format("woff2"),
url("Inter-BlackItalic.woff?v=3.12") format("woff");
}
|
2302_79757062/hrms
|
roster/src/assets/Inter/inter.css
|
CSS
|
agpl-3.0
| 3,844
|
<template>
<div class="flex mb-4">
<!-- Month Change -->
<Button icon="chevron-left" variant="ghost" @click="emit('addToMonth', -1)" />
<span class="px-1 w-24 text-center my-auto font-medium">
{{ props.firstOfMonth.format("MMM") }} {{ firstOfMonth.format("YYYY") }}
</span>
<Button icon="chevron-right" variant="ghost" @click="emit('addToMonth', 1)" />
<!-- Filters -->
<div class="ml-auto px-2 overflow-x-clip">
<div
class="ml-auto space-x-2 flex transition-all"
:class="showFilters ? 'w-full' : 'w-0 overflow-hidden'"
>
<div v-for="[key, value] of Object.entries(filters)" :key="key" class="w-40">
<FormControl
type="autocomplete"
:placeholder="toTitleCase(key)"
:options="value.options"
v-model="value.model"
:disabled="!value.options.length"
/>
</div>
<Button
icon="x"
@click="Object.values(filters).forEach((d) => (d.model = null))"
/>
</div>
</div>
<Button
:icon="showFilters ? 'chevrons-right' : 'chevrons-left'"
variant="ghost"
@click="showFilters = !showFilters"
/>
</div>
</template>
<script setup lang="ts">
import { ref, reactive, watch } from "vue";
import { FormControl, createListResource } from "frappe-ui";
import { Dayjs } from "dayjs";
import { raiseToast } from "../utils";
export type FilterField = "company" | "department" | "branch" | "designation" | "shift_type";
const props = defineProps<{
firstOfMonth: Dayjs;
}>();
const emit = defineEmits<{
(e: "addToMonth", change: number): void;
(e: "updateFilters", newFilters: { [K in FilterField]: string }): void;
}>();
const showFilters = ref(true);
const filters: {
[K in FilterField]: {
options: string[];
model?: { value: string } | null;
};
} = reactive({
company: { options: [], model: null },
department: { options: [], model: null },
branch: { options: [], model: null },
designation: { options: [], model: null },
shift_type: { options: [], model: null },
});
watch(
() => filters.company.model,
(val) => {
if (val?.value) return getFilterOptions("department", { company: val.value });
else {
filters.department.model = null;
filters.department.options = [];
}
},
);
watch(filters, (val) => {
const newFilters = {
company: val.company.model?.value || "",
department: val.department.model?.value || "",
branch: val.branch.model?.value || "",
designation: val.designation.model?.value || "",
shift_type: val.shift_type.model?.value || "",
};
emit("updateFilters", newFilters);
});
const toTitleCase = (str: string) =>
str
.split("_")
.map((s) => s.charAt(0).toUpperCase() + s.slice(1))
.join(" ");
// RESOURCES
const getFilterOptions = (field: FilterField, listFilters: { company?: string } = {}) => {
createListResource({
doctype: toTitleCase(field),
fields: ["name"],
filters: listFilters,
auto: true,
onSuccess: (data: { name: string }[]) => {
filters[field].model = { value: "" };
filters[field].options = data.map((item) => item.name);
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
};
["company", "branch", "designation", "shift_type"].forEach((field) =>
getFilterOptions(field as FilterField),
);
</script>
|
2302_79757062/hrms
|
roster/src/components/MonthViewHeader.vue
|
Vue
|
agpl-3.0
| 3,242
|
<template>
<div
class="rounded-lg border overflow-auto max-h-[45rem]"
:class="loading && 'animate-pulse pointer-events-none'"
>
<table class="border-separate border-spacing-0">
<thead>
<tr class="sticky top-0 bg-white z-10">
<!-- Employee Search -->
<th class="p-2 border-b">
<Autocomplete
:options="employeeSearchOptions"
v-model="employeeSearch"
placeholder="Search Employee"
:multiple="true"
/>
</th>
<!-- Day/Date Row -->
<th
v-for="(day, idx) in daysOfMonth"
:key="idx"
class="font-medium border-b"
:class="{ 'border-l': idx }"
>
{{ day.dayName }} {{ dayjs(day.date).format("DD") }}
</th>
</tr>
</thead>
<tbody>
<tr v-for="(employee, rowIdx) in employees" :key="employee.name">
<!-- Employee Column -->
<td
v-if="
!employeeSearch?.length ||
employeeSearch?.some((item) => item.value === employee?.name)
"
class="px-2 py-7 z-[5]"
:class="{ 'border-t': rowIdx }"
>
<div class="flex" :class="!employee.designation && 'items-center'">
<Avatar
:label="employee.employee_name"
:image="employee.image"
size="2xl"
/>
<div class="flex flex-col ml-2 my-0.5 truncate">
<div class="truncate text-base">
{{ employee.employee_name }}
</div>
<div class="mt-auto text-xs text-gray-500 truncate">
{{ employee.designation }}
</div>
</div>
</div>
</td>
<!-- Events -->
<td
v-if="
!employeeSearch?.length ||
employeeSearch?.some((item) => item.value === employee?.name)
"
v-for="(day, colIdx) in daysOfMonth"
:key="colIdx"
class="p-1.5"
:class="{
'border-l': colIdx,
'border-t': rowIdx,
'align-top': events.data?.[employee.name]?.[day.date],
'align-middle bg-blue-50':
events.data?.[employee.name]?.[day.date]?.holiday,
'align-middle bg-pink-50':
events.data?.[employee.name]?.[day.date]?.leave,
'bg-gray-50':
dropCell.employee === employee.name &&
dropCell.date === day.date &&
!(
isHolidayOrLeave(employee.name, day.date) ||
hasSameShift(employee.name, day.date)
),
}"
@mouseenter="
hoveredCell.employee = employee.name;
hoveredCell.date = day.date;
"
@mouseleave="
hoveredCell.employee = '';
hoveredCell.date = '';
"
@dragover.prevent
@dragenter="
dropCell.employee = employee.name;
dropCell.date = day.date;
"
@drop="
() => {
if (
!(
isHolidayOrLeave(employee.name, day.date) ||
hasSameShift(employee.name, day.date)
)
) {
loading = true;
swapShift.submit();
}
}
"
>
<!-- Holiday -->
<div
v-if="events.data?.[employee.name]?.[day.date]?.holiday"
class="blocked-cell"
>
{{
events.data[employee.name][day.date].weekly_off
? "WO"
: events.data[employee.name][day.date].description
}}
</div>
<!-- Leave -->
<div
v-else-if="events.data?.[employee.name]?.[day.date]?.leave"
class="blocked-cell"
>
{{ events.data[employee.name][day.date].leave_type }}
</div>
<!-- Shifts -->
<div v-else class="flex flex-col space-y-1.5 translate-x-0 translate-y-0">
<div
v-for="shift in events.data?.[employee.name]?.[day.date]"
@mouseenter="
hoveredCell.shift = shift.name;
hoveredCell.shift_type = shift.shift_type;
hoveredCell.shift_status = shift.status;
"
@mouseleave="
hoveredCell.shift = '';
hoveredCell.shift_type = '';
hoveredCell.shift_status = '';
"
@dragenter="dropCell.shift = shift.name"
@dragleave="dropCell.shift = ''"
:draggable="true"
@dragstart="
(event) => {
if (event.dataTransfer) {
event.dataTransfer.effectAllowed = 'move';
}
}
"
@dragend="
if (!loading) dropCell = { employee: '', date: '', shift: '' };
"
class="rounded border-2 px-2 py-1 cursor-pointer"
:class="[
shift.status === 'Inactive' && 'border-dashed',
dropCell.employee === employee.name &&
dropCell.date === day.date &&
dropCell.shift === shift.name &&
!hasSameShift(employee.name, day.date) &&
'scale-105',
hoveredCell.employee === employee.name &&
hoveredCell.date === day.date &&
hoveredCell.shift === shift.name &&
dropCell.employee &&
'opacity-0',
]"
:style="{
borderColor:
hoveredCell.shift === shift.name &&
hoveredCell.date === day.date
? colors[shift.color as Color][300]
: colors[shift.color as Color][200],
backgroundColor:
shift.status === 'Active'
? colors[shift.color as Color][50]
: 'white',
}"
@click="
shiftAssignment = shift.name;
showShiftAssignmentDialog = true;
"
>
<div class="truncate mb-1 pointer-events-none">
{{ shift["shift_type"] }}
</div>
<div class="text-xs text-gray-500 pointer-events-none">
{{ shift["start_time"] }} - {{ shift["end_time"] }}
</div>
</div>
<!-- Add Shift -->
<Button
variant="outline"
icon="plus"
class="border-2 active:bg-white w-full"
:class="
hoveredCell.employee === employee.name &&
hoveredCell.date === day.date &&
!dropCell.employee
? 'visible'
: 'invisible'
"
@click="
shiftAssignment = '';
showShiftAssignmentDialog = true;
"
/>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<ShiftAssignmentDialog
v-model="showShiftAssignmentDialog"
:isDialogOpen="showShiftAssignmentDialog"
:shiftAssignmentName="shiftAssignment"
:selectedCell="{ employee: hoveredCell.employee, date: hoveredCell.date }"
:employees="employees"
@fetchEvents="
events.fetch();
showShiftAssignmentDialog = false;
"
/>
</template>
<script setup lang="ts">
import { ref, computed, watch } from "vue";
import colors from "tailwindcss/colors";
import { Avatar, Autocomplete, createResource } from "frappe-ui";
import { Dayjs } from "dayjs";
import { dayjs, raiseToast } from "../utils";
import { EmployeeFilters } from "../views/MonthView.vue";
import ShiftAssignmentDialog from "./ShiftAssignmentDialog.vue";
interface Holiday {
holiday: string;
description: string;
weekly_off: 0 | 1;
}
interface HolidayWithDate extends Holiday {
holiday_date: string;
}
interface Leave {
leave: string;
leave_type: string;
}
interface LeaveApplication extends Leave {
from_date: string;
to_date: string;
}
type Color =
| "blue"
| "cyan"
| "fuchsia"
| "green"
| "lime"
| "orange"
| "pink"
| "red"
| "violet"
| "yellow";
type Shift = {
[K in "name" | "shift_type" | "status" | "start_time" | "end_time"]: string;
} & {
color: Color;
};
interface ShiftAssignment extends Shift {
start_date: string;
end_date: string;
}
type Events = Record<string, (HolidayWithDate | LeaveApplication | ShiftAssignment)[]>;
type MappedEvents = Record<string, Record<string, Holiday | Leave | Shift[]>>;
const props = defineProps<{
firstOfMonth: Dayjs;
employees: {
[K in "name" | "employee_name" | "designation" | "image"]: string;
}[];
employeeFilters: { [K in keyof EmployeeFilters]?: string };
shiftTypeFilter: string;
}>();
const loading = ref(true);
const employeeSearch = ref<{ value: string; label: string }[]>();
const shiftAssignment = ref<string>();
const showShiftAssignmentDialog = ref(false);
const hoveredCell = ref({ employee: "", date: "", shift: "", shift_type: "", shift_status: "" });
const dropCell = ref({ employee: "", date: "", shift: "" });
const daysOfMonth = computed(() => {
const daysOfMonth = [];
for (let i = 1; i <= props.firstOfMonth.daysInMonth(); i++) {
const date = props.firstOfMonth.date(i);
daysOfMonth.push({
dayName: date.format("ddd"),
date: date.format("YYYY-MM-DD"),
});
}
return daysOfMonth;
});
const employeeSearchOptions = computed(() => {
return props.employees.map((employee: { name: string; employee_name: string }) => ({
value: employee.name,
label: `${employee.name}: ${employee.employee_name}`,
}));
});
watch(
() => [props.firstOfMonth, props.employeeFilters, props.shiftTypeFilter],
() => {
loading.value = true;
events.fetch();
},
{ deep: true },
);
watch(loading, (val) => {
if (!val) dropCell.value = { employee: "", date: "", shift: "" };
});
const isHolidayOrLeave = (employee: string, day: string) =>
events.data?.[employee]?.[day]?.holiday || events.data?.[employee]?.[day]?.leave;
const hasSameShift = (employee: string, day: string) =>
Array.isArray(events.data?.[employee]?.[day]) &&
events.data?.[employee]?.[day].some(
(shift: Shift) =>
shift.shift_type === hoveredCell.value.shift_type &&
shift.status === hoveredCell.value.shift_status,
);
// RESOURCES
const events = createResource({
url: "hrms.api.roster.get_events",
auto: true,
makeParams() {
return {
month_start: props.firstOfMonth.format("YYYY-MM-DD"),
month_end: props.firstOfMonth.endOf("month").format("YYYY-MM-DD"),
employee_filters: props.employeeFilters,
shift_filters: props.shiftTypeFilter ? { shift_type: props.shiftTypeFilter } : {},
};
},
onSuccess() {
loading.value = false;
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
transform: (data: Events) => {
const mappedEvents: MappedEvents = {};
for (const employee in data) {
mapEventsToDates(data, mappedEvents, employee);
}
return mappedEvents;
},
});
defineExpose({ events });
const swapShift = createResource({
url: "hrms.api.roster.swap_shift",
makeParams() {
return {
src_shift: hoveredCell.value.shift,
src_date: hoveredCell.value.date,
tgt_employee: dropCell.value.employee,
tgt_date: dropCell.value.date,
tgt_shift: dropCell.value.shift,
};
},
onSuccess: () => {
raiseToast("success", `Shift ${dropCell.value.shift ? "swapped" : "moved"} successfully!`);
events.fetch();
},
onError(error: { messages: string[] }) {
loading.value = false;
raiseToast("error", error.messages[0]);
},
});
const mapEventsToDates = (data: Events, mappedEvents: MappedEvents, employee: string) => {
mappedEvents[employee] = {};
for (let d = 1; d <= props.firstOfMonth.daysInMonth(); d++) {
const date = props.firstOfMonth.date(d);
const key = date.format("YYYY-MM-DD");
for (const event of Object.values(data[employee])) {
let result: Holiday | Leave | undefined;
if ("holiday" in event) {
result = handleHoliday(event, date);
if (result) {
mappedEvents[employee][key] = result;
break;
}
} else if ("leave" in event) {
result = handleLeave(event, date);
if (result) {
mappedEvents[employee][key] = result;
break;
}
} else handleShifts(event, date, mappedEvents, employee, key);
}
sortShiftsByStartTime(mappedEvents, employee, key);
}
};
const handleHoliday = (event: HolidayWithDate, date: Dayjs) => {
if (date.isSame(event.holiday_date)) {
return {
holiday: event.holiday,
description: event.description,
weekly_off: event.weekly_off,
};
}
};
const handleLeave = (event: LeaveApplication, date: Dayjs) => {
if (dayjs(event.from_date).isSameOrBefore(date) && dayjs(event.to_date).isSameOrAfter(date))
return {
leave: event.leave,
leave_type: event.leave_type,
};
};
const handleShifts = (
event: ShiftAssignment,
date: Dayjs,
mappedEvents: MappedEvents,
employee: string,
key: string,
) => {
if (
dayjs(event.start_date).isSameOrBefore(date) &&
(dayjs(event.end_date).isSameOrAfter(date) || !event.end_date)
) {
if (!Array.isArray(mappedEvents[employee][key])) mappedEvents[employee][key] = [];
mappedEvents[employee][key].push({
name: event.name,
shift_type: event.shift_type,
status: event.status,
start_time: event.start_time.split(":").slice(0, 2).join(":"),
end_time: event.end_time.split(":").slice(0, 2).join(":"),
color: event.color.toLowerCase() as Color,
});
}
};
const sortShiftsByStartTime = (mappedEvents: MappedEvents, employee: string, key: string) => {
if (Array.isArray(mappedEvents[employee][key]))
mappedEvents[employee][key].sort((a: Shift, b: Shift) =>
a.start_time.localeCompare(b.start_time),
);
};
</script>
<style>
th,
td {
@apply max-w-32 min-w-32;
font-size: 0.875rem;
}
th:first-child,
td:first-child {
@apply sticky left-0 max-w-64 min-w-64 bg-white border-r;
}
.blocked-cell {
@apply text-sm text-gray-500 text-center p-2;
}
</style>
|
2302_79757062/hrms
|
roster/src/components/MonthViewTable.vue
|
Vue
|
agpl-3.0
| 13,088
|
<template>
<div class="h-12 bg-white border-b px-12 flex items-center">
<a class="text-xl" href="/">Home</a>
<Dropdown
class="ml-auto"
:options="[
{
label: 'My Account',
onClick: () => goTo('/me'),
},
{
label: 'Log Out',
onClick: () => logout.submit(),
},
{
label: 'Switch to Desk',
onClick: () => goTo('/app'),
},
]"
>
<Avatar
:label="props.user?.full_name"
:image="props.user?.user_image"
size="lg"
class="cursor-pointer"
/>
</Dropdown>
</div>
</template>
<script setup lang="ts">
import { Dropdown, Avatar, createResource } from "frappe-ui";
import router from "../router";
import User from "../views/Home.vue";
const props = defineProps<{
user: User;
}>();
const goTo = (path: string) => {
window.location.href = path;
};
// RESOURCES
const logout = createResource({
url: "logout",
onSuccess() {
goTo("/login");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
</script>
|
2302_79757062/hrms
|
roster/src/components/NavBar.vue
|
Vue
|
agpl-3.0
| 1,029
|
<template>
<Dialog :options="{ title: dialog.title, size: '4xl' }">
<template #body-content>
<div class="grid grid-cols-2 gap-6">
<FormControl
type="autocomplete"
label="Employee"
v-model="form.employee"
:disabled="!!props.shiftAssignmentName"
:options="employees"
/>
<FormControl type="text" label="Company" v-model="form.company" :disabled="true" />
<FormControl
type="text"
label="Employee Name"
v-model="form.employee_name"
:disabled="true"
/>
<DatePicker
label="Start Date"
v-model="form.start_date"
:disabled="!!props.shiftAssignmentName"
/>
<FormControl
type="autocomplete"
label="Shift Type"
v-model="form.shift_type"
:disabled="!!props.shiftAssignmentName"
:options="shiftTypes.data"
/>
<DatePicker label="End Date" v-model="form.end_date" />
<FormControl
type="select"
:options="['Active', 'Inactive']"
label="Status"
v-model="form.status"
/>
<FormControl
type="text"
label="Department"
v-model="form.department"
:disabled="true"
/>
</div>
<!-- Schedule Settings -->
<div
v-if="(!props.shiftAssignmentName && showShiftScheduleSettings) || form.schedule"
class="mt-6 space-y-6"
>
<hr />
<h4 class="font-semibold">Schedule Settings</h4>
<div class="grid grid-cols-2 gap-6">
<div class="space-y-1.5">
<div class="text-xs text-gray-600">Repeat On Days</div>
<div
class="border rounded grid grid-flow-col h-7 justify-stretch overflow-clip"
>
<div
v-for="(isSelected, day) of repeatOnDays"
class="cursor-pointer flex flex-col"
:class="{
'border-r': day !== 'Sunday',
'bg-gray-100 text-gray-500': !isSelected,
'pointer-events-none': !!props.shiftAssignmentName,
}"
@click="repeatOnDays[day] = !repeatOnDays[day]"
>
<div class="text-center text-sm my-auto">
{{ day.substring(0, 3) }}
</div>
</div>
</div>
</div>
<FormControl
type="select"
:options="[
'Every Week',
'Every 2 Weeks',
'Every 3 Weeks',
'Every 4 Weeks',
]"
label="Frequency"
v-model="frequency"
:disabled="!!props.shiftAssignmentName"
/>
</div>
</div>
<Dialog
v-model="showDeleteDialog"
:options="{
title: deleteDialogOptions.title,
actions: [
{
label: 'Confirm',
variant: 'solid',
onClick: deleteDialogOptions.action,
},
],
}"
>
<template #body-content>
<div v-html="deleteDialogOptions.message" />
</template>
</Dialog>
</template>
<template #actions>
<div class="flex space-x-3 justify-end">
<Dropdown v-if="props.shiftAssignmentName" :options="actions">
<Button size="md" label="Delete" class="w-28 text-red-600" />
</Dropdown>
<Button
size="md"
variant="solid"
:disabled="dialog.actionDisabled"
class="w-28"
@click="dialog.action"
>
{{ dialog.button }}
</Button>
</div>
</template>
</Dialog>
</template>
<script setup lang="ts">
import { reactive, ref, computed, watch } from "vue";
import {
DatePicker,
Dialog,
FormControl,
Dropdown,
createDocumentResource,
createResource,
createListResource,
} from "frappe-ui";
import { dayjs, raiseToast } from "../utils";
type Status = "Active" | "Inactive";
type Form = {
[K in "company" | "employee_name" | "department" | "employee" | "shift_type"]:
| string
| { value: string; label?: string };
} & {
start_date: string;
end_date: string;
status: Status | { value: Status; label?: Status };
schedule?: string;
};
interface Props {
isDialogOpen: boolean;
shiftAssignmentName?: string;
selectedCell?: {
employee: string;
date: string;
};
employees?: {
name: string;
employee_name: string;
}[];
}
const props = withDefaults(defineProps<Props>(), {
employees: () => [],
});
const emit = defineEmits<{
(e: "fetchEvents"): void;
}>();
const formObject: Form = {
employee: "",
company: "",
employee_name: "",
start_date: "",
shift_type: "",
end_date: "",
status: "Active",
department: "",
schedule: "",
};
const repeatOnDaysObject = {
Monday: false,
Tuesday: false,
Wednesday: false,
Thursday: false,
Friday: false,
Saturday: false,
Sunday: false,
};
const form = reactive({ ...formObject });
const repeatOnDays = reactive({ ...repeatOnDaysObject });
const shiftAssignment = ref();
const selectedDate = ref();
const frequency = ref("Every Week");
const showDeleteDialog = ref(false);
const deleteDialogOptions = ref({ title: "", message: "", action: () => {} });
const dialog = computed(() => {
if (props.shiftAssignmentName)
return {
title: `[${selectedDate.value}] Shift Assignment ${props.shiftAssignmentName}`,
button: "Update",
action: updateShiftAssigment,
actionDisabled:
form.status === shiftAssignment.value?.doc?.status &&
form.end_date === shiftAssignment.value?.doc?.end_date,
};
return {
title: "New Shift Assignment",
button: "Submit",
action: createShiftAssigment,
actionDisabled: false,
};
});
const actions = computed(() => {
const options = [
{
label: `Shift for ${selectedDate.value}`,
onClick: () => {
deleteDialogOptions.value = {
title: "Delete Shift?",
message: `This will remove Shift Assignment: <a href='/app/shift-assignment/${props.shiftAssignmentName}' target='_blank'><u>${props.shiftAssignmentName}</u></a> scheduled for <b>${selectedDate.value}</b>.`,
action: () => deleteCurrentShift.submit(),
};
showDeleteDialog.value = true;
},
},
{
label: "All Consecutive Shifts",
onClick: () => {
deleteDialogOptions.value = {
title: "Delete Shift Assignment?",
message: `This will delete Shift Assignment: <a href='/app/shift-assignment/${
props.shiftAssignmentName
}' target='_blank'><u>${
props.shiftAssignmentName
}</u></a> (scheduled from <b>${form.start_date}</b>${
form.end_date ? ` to <b>${form.end_date}</b>` : ""
}).`,
action: async () => {
await shiftAssignment.value.setValue.submit({ docstatus: 2 });
shiftAssignments.delete.submit(props.shiftAssignmentName);
},
};
showDeleteDialog.value = true;
},
},
];
if (form.schedule)
options.push({
label: "Shift Assignment Schedule",
onClick: () => {
deleteDialogOptions.value = {
title: "Delete Shift Assignment Schedule?",
message: `This will delete Shift Assignment Schedule: <a href='/app/shift-assignment-schedule/${form.schedule}' target='_blank'><u>${form.schedule}</u></a> and all the shifts associated with it.`,
action: () => deleteShiftAssignmentSchedule.submit(),
};
showDeleteDialog.value = true;
},
});
return options;
});
const showShiftScheduleSettings = computed(() => {
if (!form.start_date || dayjs(form.end_date).diff(dayjs(form.start_date), "d") < 7) {
frequency.value = "Every Week";
return false;
}
return true;
});
const employees = computed(() => {
return props.employees.map((employee) => ({
label: `${employee.name}: ${employee.employee_name}`,
value: employee.name,
employee_name: employee.employee_name,
}));
});
watch(
() => props.isDialogOpen,
(val) => {
if (!val) return;
showDeleteDialog.value = false;
if (props.shiftAssignmentName) {
shiftAssignment.value = getShiftAssignment(props.shiftAssignmentName);
if (props.selectedCell) selectedDate.value = props.selectedCell.date;
} else {
Object.assign(form, formObject);
if (!props.selectedCell) return;
form.employee = { value: props.selectedCell.employee };
form.start_date = props.selectedCell.date;
form.end_date = props.selectedCell.date;
}
},
);
watch(
() => form.employee,
(val) => {
if (props.shiftAssignmentName) return;
if (val) {
employee.fetch();
} else {
form.employee_name = "";
form.company = "";
form.department = "";
}
},
);
watch(
() => form.start_date,
() => {
Object.assign(repeatOnDays, repeatOnDaysObject);
if (!form.start_date) return;
const day = dayjs(form.start_date).format("dddd");
repeatOnDays[day as keyof typeof repeatOnDays] = true;
},
{ immediate: true },
);
const updateShiftAssigment = () => {
shiftAssignment.value.setValue.submit({ status: form.status, end_date: form.end_date });
};
const createShiftAssigment = () => {
if (
showShiftScheduleSettings.value &&
(Object.values(repeatOnDays).some((day) => !day) || frequency.value !== "Every Week")
)
createShiftAssignmentSchedule.submit();
else insertShift.submit();
};
// RESOURCES
const getShiftAssignment = (name: string) =>
createDocumentResource({
doctype: "Shift Assignment",
name: name,
onSuccess: (data: Record<string, any>) => {
Object.keys(form).forEach((key) => {
form[key as keyof Form] = data[key];
});
if (form.schedule) getShiftAssignmentSchedule(form.schedule);
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
setValue: {
onSuccess() {
raiseToast("success", "Shift Assignment updated successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
},
});
const getShiftAssignmentSchedule = (name: string) =>
createDocumentResource({
doctype: "Shift Assignment Schedule",
name: name,
onSuccess: (data: Record<string, any>) => {
frequency.value = data.frequency;
const days = data.repeat_on_days.map(
(day: { day: keyof typeof repeatOnDays }) => day.day,
);
for (const day in repeatOnDays) {
repeatOnDays[day as keyof typeof repeatOnDays] = days.includes(day);
}
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
const employee = createResource({
url: "hrms.api.roster.get_values",
makeParams() {
const employee = (form.employee as { value: string }).value;
return {
doctype: "Employee",
name: employee,
fields: ["employee_name", "company", "department"],
};
},
onSuccess: (data: { [K in "employee_name" | "company" | "department"]: string }) => {
form.employee_name = data.employee_name;
form.company = data.company;
form.department = data.department;
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
const shiftTypes = createListResource({
doctype: "Shift Type",
fields: ["name"],
auto: true,
transform: (data: { name: string }[]) => data.map((shiftType) => shiftType.name),
});
const shiftAssignments = createListResource({
doctype: "Shift Assignment",
insert: {
onSuccess() {
raiseToast("success", "Shift Assignment created successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
},
delete: {
onSuccess() {
raiseToast("success", "Shift Assignment deleted successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
},
});
const insertShift = createResource({
url: "hrms.api.roster.insert_shift",
makeParams() {
return {
employee: (form.employee as { value: string }).value,
shift_type: (form.shift_type as { value: string }).value,
company: form.company,
status: form.status,
start_date: form.start_date,
end_date: form.end_date,
};
},
onSuccess: () => {
raiseToast("success", "Shift Assignment created successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
const deleteCurrentShift = createResource({
url: "hrms.api.roster.break_shift",
makeParams() {
return {
assignment: props.shiftAssignmentName,
date: selectedDate.value,
};
},
onSuccess: () => {
raiseToast("success", "Shift deleted successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
const createShiftAssignmentSchedule = createResource({
url: "hrms.api.roster.create_shift_assignment_schedule",
makeParams() {
return {
employee: (form.employee as { value: string }).value,
shift_type: (form.shift_type as { value: string }).value,
company: form.company,
status: form.status,
start_date: form.start_date,
end_date: form.end_date,
repeat_on_days: Object.keys(repeatOnDays).filter(
(day) => repeatOnDays[day as keyof typeof repeatOnDays],
),
frequency: frequency.value,
};
},
onSuccess: () => {
raiseToast("success", "Shift Assignment Schedule created successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
const deleteShiftAssignmentSchedule = createResource({
url: "hrms.api.roster.delete_shift_assignment_schedule",
makeParams() {
return { schedule: form.schedule };
},
onSuccess: () => {
raiseToast("success", "Shift Assignment Schedule deleted successfully!");
emit("fetchEvents");
},
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
</script>
|
2302_79757062/hrms
|
roster/src/components/ShiftAssignmentDialog.vue
|
Vue
|
agpl-3.0
| 13,233
|
@import "./assets/Inter/inter.css";
@import "frappe-ui/src/style.css";
|
2302_79757062/hrms
|
roster/src/index.css
|
CSS
|
agpl-3.0
| 71
|
import "./index.css";
import { createApp } from "vue";
import router from "./router";
import App from "./App.vue";
import { Button, setConfig, frappeRequest, resourcesPlugin } from "frappe-ui";
const app = createApp(App);
setConfig("resourceFetcher", frappeRequest);
app.use(router);
app.use(resourcesPlugin);
app.component("Button", Button);
app.mount("#app");
|
2302_79757062/hrms
|
roster/src/main.ts
|
TypeScript
|
agpl-3.0
| 368
|
import { createRouter, createWebHistory } from "vue-router";
const routes = [
{
path: "/",
name: "Home",
component: () => import("./views/Home.vue"),
},
];
const router = createRouter({
history: createWebHistory("/hr/roster"),
routes,
});
export default router;
|
2302_79757062/hrms
|
roster/src/router.ts
|
TypeScript
|
agpl-3.0
| 275
|
import dayjs from "dayjs";
import updateLocale from "dayjs/plugin/updateLocale";
import localizedFormat from "dayjs/plugin/localizedFormat";
import isSameOrBefore from "dayjs/plugin/isSameOrBefore";
import isSameOrAfter from "dayjs/plugin/isSameOrAfter";
dayjs.extend(updateLocale);
dayjs.extend(localizedFormat);
dayjs.extend(isSameOrBefore);
dayjs.extend(isSameOrAfter);
export default dayjs;
|
2302_79757062/hrms
|
roster/src/utils/dayjs.ts
|
TypeScript
|
agpl-3.0
| 397
|
import { toast } from "frappe-ui";
export { default as dayjs } from "./dayjs";
export const raiseToast = (type: "success" | "error", message: string) => {
if (type === "success")
return toast({
title: "Success",
text: message,
icon: "check-circle",
position: "bottom-right",
iconClasses: "text-green-500",
});
const div = document.createElement("div");
div.innerHTML = message;
// strip html tags
const text =
div.textContent || div.innerText || "Failed to perform action. Please try again later.";
toast({
title: "Error",
text: text,
icon: "alert-circle",
position: "bottom-right",
iconClasses: "text-red-500",
timeout: 7,
});
};
|
2302_79757062/hrms
|
roster/src/utils/index.ts
|
TypeScript
|
agpl-3.0
| 674
|
<template>
<div v-if="user.data" class="bg-gray-50 min-h-screen">
<NavBar :user="user.data" />
<MonthView />
<Toasts />
</div>
</template>
<script setup lang="ts">
import { Toasts, createResource } from "frappe-ui";
import router from "../router";
import NavBar from "../components/NavBar.vue";
import MonthView from "./MonthView.vue";
export type User = {
[K in "name" | "first_name" | "full_name" | "user_image"]: string;
} & {
roles: string[];
};
// RESOURCES
const user = createResource({
url: "hrms.api.get_current_user_info",
auto: true,
onError() {
window.location.href = "/login?redirect-to=%2Fhr%2Froster";
},
});
</script>
|
2302_79757062/hrms
|
roster/src/views/Home.vue
|
Vue
|
agpl-3.0
| 654
|
<template>
<div class="px-12 py-6 space-y-6">
<div class="flex items-center">
<FeatherIcon name="calendar" class="h-7 w-7 text-gray-500 mr-3" />
<span class="font-semibold text-2xl mr-1">Month View</span>
<Dropdown
:options="[
{
label: 'Shift Assignment',
onClick: () => {
showShiftAssignmentDialog = true;
},
},
]"
:button="{
label: 'Create',
variant: 'solid',
iconRight: 'chevron-down',
size: 'md',
}"
class="ml-auto"
/>
</div>
<div class="bg-white rounded-lg border p-4">
<MonthViewHeader
:firstOfMonth="firstOfMonth"
@updateFilters="updateFilters"
@addToMonth="addToMonth"
/>
<MonthViewTable
ref="monthViewTable"
:firstOfMonth="firstOfMonth"
:employees="employees.data || []"
:employeeFilters="employeeFilters"
:shiftTypeFilter="shiftTypeFilter"
/>
</div>
</div>
<ShiftAssignmentDialog
v-model="showShiftAssignmentDialog"
:isDialogOpen="showShiftAssignmentDialog"
:employees="employees.data"
@fetchEvents="
monthViewTable?.events.fetch();
showShiftAssignmentDialog = false;
"
/>
</template>
<script setup lang="ts">
import { ref, reactive } from "vue";
import { Dropdown, FeatherIcon, createListResource } from "frappe-ui";
import { dayjs, raiseToast } from "../utils";
import MonthViewTable from "../components/MonthViewTable.vue";
import MonthViewHeader from "../components/MonthViewHeader.vue";
import ShiftAssignmentDialog from "../components/ShiftAssignmentDialog.vue";
export type EmployeeFilters = {
[K in "status" | "company" | "department" | "branch" | "designation"]?: string;
};
const monthViewTable = ref<InstanceType<typeof MonthViewTable>>();
const showShiftAssignmentDialog = ref(false);
const firstOfMonth = ref(dayjs().date(1).startOf("D"));
const shiftTypeFilter = ref("");
const employeeFilters = reactive<EmployeeFilters>({
status: "Active",
});
const addToMonth = (change: number) => {
firstOfMonth.value = firstOfMonth.value.add(change, "M");
};
const updateFilters = (newFilters: EmployeeFilters & { shift_type: string }) => {
let employeeUpdated = false;
(Object.entries(newFilters) as [keyof EmployeeFilters | "shift_type", string][]).forEach(
([key, value]) => {
if (key === "shift_type") {
shiftTypeFilter.value = value;
return;
}
if (value) employeeFilters[key] = value;
else delete employeeFilters[key];
employeeUpdated = true;
},
);
if (employeeUpdated) employees.fetch();
};
// RESOURCES
const employees = createListResource({
doctype: "Employee",
fields: ["name", "employee_name", "designation", "image"],
filters: employeeFilters,
auto: true,
onError(error: { messages: string[] }) {
raiseToast("error", error.messages[0]);
},
});
</script>
|
2302_79757062/hrms
|
roster/src/views/MonthView.vue
|
Vue
|
agpl-3.0
| 2,788
|
module.exports = {
presets: [require("frappe-ui/src/utils/tailwind.config")],
content: [
"./index.html",
"./src/**/*.{vue,js,ts,jsx,tsx}",
"./node_modules/frappe-ui/src/components/**/*.{vue,js,ts,jsx,tsx}",
"../node_modules/frappe-ui/src/components/**/*.{vue,js,ts,jsx,tsx}",
],
theme: {
extend: {},
},
plugins: [],
};
|
2302_79757062/hrms
|
roster/tailwind.config.js
|
JavaScript
|
agpl-3.0
| 335
|
import { defineConfig } from "vite";
import vue from "@vitejs/plugin-vue";
import fs from "fs";
import path from "path";
// https://vitejs.dev/config/
export default defineConfig({
plugins: [vue()],
server: {
port: 8081,
proxy: getProxyOptions(),
},
resolve: {
alias: {
"@": path.resolve(__dirname, "src"),
},
},
build: {
outDir: `../hrms/public/roster`,
emptyOutDir: true,
target: "es2015",
commonjsOptions: {
include: [/tailwind.config.js/, /node_modules/],
},
},
optimizeDeps: {
include: [
"frappe-ui > feather-icons",
"showdown",
"tailwind.config.js",
"engine.io-client",
],
},
});
function getProxyOptions() {
const config = getCommonSiteConfig();
const webserver_port = config ? config.webserver_port : 8000;
if (!config) {
console.log("No common_site_config.json found, using default port 8000");
}
return {
"^/(app|login|api|assets|files|private)": {
target: `http://127.0.0.1:${webserver_port}`,
ws: true,
router: function (req) {
const site_name = req.headers.host.split(":")[0];
console.log(`Proxying ${req.url} to ${site_name}:${webserver_port}`);
return `http://${site_name}:${webserver_port}`;
},
},
};
}
function getCommonSiteConfig() {
let currentDir = path.resolve(".");
// traverse up till we find frappe-bench with sites directory
while (currentDir !== "/") {
if (
fs.existsSync(path.join(currentDir, "sites")) &&
fs.existsSync(path.join(currentDir, "apps"))
) {
let configPath = path.join(currentDir, "sites", "common_site_config.json");
if (fs.existsSync(configPath)) {
return JSON.parse(fs.readFileSync(configPath));
}
return null;
}
currentDir = path.resolve(currentDir, "..");
}
return null;
}
|
2302_79757062/hrms
|
roster/vite.config.js
|
JavaScript
|
agpl-3.0
| 1,741
|
FROM python:3.10-slim
WORKDIR /app
# https://github.com/aptible/supercronic
ARG TARGETARCH
ENV SUPERCRONIC_VERSION=v0.2.34
RUN set -ex && \
apt-get update && \
apt-get install -y --no-install-recommends curl ca-certificates && \
case ${TARGETARCH} in \
amd64) \
export SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/${SUPERCRONIC_VERSION}/supercronic-linux-amd64; \
export SUPERCRONIC_SHA1SUM=e8631edc1775000d119b70fd40339a7238eece14; \
export SUPERCRONIC=supercronic-linux-amd64; \
;; \
arm64) \
export SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/${SUPERCRONIC_VERSION}/supercronic-linux-arm64; \
export SUPERCRONIC_SHA1SUM=4ab6343b52bf9da592e8b4bb7ae6eb5a8e21b71e; \
export SUPERCRONIC=supercronic-linux-arm64; \
;; \
*) \
echo "Unsupported architecture: ${TARGETARCH}"; \
exit 1; \
;; \
esac && \
echo "Downloading supercronic for ${TARGETARCH} from ${SUPERCRONIC_URL}" && \
# 添加重试机制和超时设置
for i in 1 2 3 4 5; do \
echo "Download attempt $i/5"; \
if curl --fail --silent --show-error --location --retry 3 --retry-delay 2 --connect-timeout 30 --max-time 120 -o "$SUPERCRONIC" "$SUPERCRONIC_URL"; then \
echo "Download successful"; \
break; \
else \
echo "Download attempt $i failed, exit code: $?"; \
if [ $i -eq 5 ]; then \
echo "All download attempts failed"; \
exit 1; \
fi; \
sleep $((i * 2)); \
fi; \
done && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
chmod +x "$SUPERCRONIC" && \
mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \
ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \
# 验证安装
supercronic -version && \
apt-get remove -y curl && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY main.py .
COPY docker/manage.py .
# 复制 entrypoint.sh 并强制转换为 LF 格式
COPY docker/entrypoint.sh /entrypoint.sh.tmp
RUN sed -i 's/\r$//' /entrypoint.sh.tmp && \
mv /entrypoint.sh.tmp /entrypoint.sh && \
chmod +x /entrypoint.sh && \
chmod +x manage.py && \
mkdir -p /app/config /app/output
ENV PYTHONUNBUFFERED=1 \
CONFIG_PATH=/app/config/config.yaml \
FREQUENCY_WORDS_PATH=/app/config/frequency_words.txt
ENTRYPOINT ["/entrypoint.sh"]
|
2302_81331056/TrendRadar
|
docker/Dockerfile
|
Dockerfile
|
agpl-3.0
| 2,463
|
#!/bin/bash
set -e
# 检查配置文件
if [ ! -f "/app/config/config.yaml" ] || [ ! -f "/app/config/frequency_words.txt" ]; then
echo "❌ 配置文件缺失"
exit 1
fi
# 保存环境变量
env >> /etc/environment
case "${RUN_MODE:-cron}" in
"once")
echo "🔄 单次执行"
exec /usr/local/bin/python main.py
;;
"cron")
# 生成 crontab
echo "${CRON_SCHEDULE:-*/30 * * * *} cd /app && /usr/local/bin/python main.py" > /tmp/crontab
echo "📅 生成的crontab内容:"
cat /tmp/crontab
if ! /usr/local/bin/supercronic -test /tmp/crontab; then
echo "❌ crontab格式验证失败"
exit 1
fi
# 立即执行一次(如果配置了)
if [ "${IMMEDIATE_RUN:-false}" = "true" ]; then
echo "▶️ 立即执行一次"
/usr/local/bin/python main.py
fi
echo "⏰ 启动supercronic: ${CRON_SCHEDULE:-*/30 * * * *}"
echo "🎯 supercronic 将作为 PID 1 运行"
exec /usr/local/bin/supercronic -passthrough-logs /tmp/crontab
;;
*)
exec "$@"
;;
esac
|
2302_81331056/TrendRadar
|
docker/entrypoint.sh
|
Shell
|
agpl-3.0
| 1,068
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
新闻爬虫容器管理工具 - supercronic
"""
import os
import sys
import subprocess
import time
from pathlib import Path
def run_command(cmd, shell=True, capture_output=True):
"""执行系统命令"""
try:
result = subprocess.run(
cmd, shell=shell, capture_output=capture_output, text=True
)
return result.returncode == 0, result.stdout, result.stderr
except Exception as e:
return False, "", str(e)
def manual_run():
"""手动执行一次爬虫"""
print("🔄 手动执行爬虫...")
try:
result = subprocess.run(
["python", "main.py"], cwd="/app", capture_output=False, text=True
)
if result.returncode == 0:
print("✅ 执行完成")
else:
print(f"❌ 执行失败,退出码: {result.returncode}")
except Exception as e:
print(f"❌ 执行出错: {e}")
def parse_cron_schedule(cron_expr):
"""解析cron表达式并返回人类可读的描述"""
if not cron_expr or cron_expr == "未设置":
return "未设置"
try:
parts = cron_expr.strip().split()
if len(parts) != 5:
return f"原始表达式: {cron_expr}"
minute, hour, day, month, weekday = parts
# 分析分钟
if minute == "*":
minute_desc = "每分钟"
elif minute.startswith("*/"):
interval = minute[2:]
minute_desc = f"每{interval}分钟"
elif "," in minute:
minute_desc = f"在第{minute}分钟"
else:
minute_desc = f"在第{minute}分钟"
# 分析小时
if hour == "*":
hour_desc = "每小时"
elif hour.startswith("*/"):
interval = hour[2:]
hour_desc = f"每{interval}小时"
elif "," in hour:
hour_desc = f"在{hour}点"
else:
hour_desc = f"在{hour}点"
# 分析日期
if day == "*":
day_desc = "每天"
elif day.startswith("*/"):
interval = day[2:]
day_desc = f"每{interval}天"
else:
day_desc = f"每月{day}号"
# 分析月份
if month == "*":
month_desc = "每月"
else:
month_desc = f"在{month}月"
# 分析星期
weekday_names = {
"0": "周日", "1": "周一", "2": "周二", "3": "周三",
"4": "周四", "5": "周五", "6": "周六", "7": "周日"
}
if weekday == "*":
weekday_desc = ""
else:
weekday_desc = f"在{weekday_names.get(weekday, weekday)}"
# 组合描述
if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and weekday == "*":
# 简单的间隔模式,如 */30 * * * *
return f"每{minute[2:]}分钟执行一次"
elif hour != "*" and minute != "*" and day == "*" and month == "*" and weekday == "*":
# 每天特定时间,如 0 9 * * *
return f"每天{hour}:{minute.zfill(2)}执行"
elif weekday != "*" and day == "*":
# 每周特定时间
return f"{weekday_desc}{hour}:{minute.zfill(2)}执行"
else:
# 复杂模式,显示详细信息
desc_parts = [part for part in [month_desc, day_desc, weekday_desc, hour_desc, minute_desc] if part and part != "每月" and part != "每天" and part != "每小时"]
if desc_parts:
return " ".join(desc_parts) + "执行"
else:
return f"复杂表达式: {cron_expr}"
except Exception as e:
return f"解析失败: {cron_expr}"
def show_status():
"""显示容器状态"""
print("📊 容器状态:")
# 检查 PID 1 状态
supercronic_is_pid1 = False
pid1_cmdline = ""
try:
with open('/proc/1/cmdline', 'r') as f:
pid1_cmdline = f.read().replace('\x00', ' ').strip()
print(f" 🔍 PID 1 进程: {pid1_cmdline}")
if "supercronic" in pid1_cmdline.lower():
print(" ✅ supercronic 正确运行为 PID 1")
supercronic_is_pid1 = True
else:
print(" ❌ PID 1 不是 supercronic")
print(f" 📋 实际的 PID 1: {pid1_cmdline}")
except Exception as e:
print(f" ❌ 无法读取 PID 1 信息: {e}")
# 检查环境变量
cron_schedule = os.environ.get("CRON_SCHEDULE", "未设置")
run_mode = os.environ.get("RUN_MODE", "未设置")
immediate_run = os.environ.get("IMMEDIATE_RUN", "未设置")
print(f" ⚙️ 运行配置:")
print(f" CRON_SCHEDULE: {cron_schedule}")
# 解析并显示cron表达式的含义
cron_description = parse_cron_schedule(cron_schedule)
print(f" ⏰ 执行频率: {cron_description}")
print(f" RUN_MODE: {run_mode}")
print(f" IMMEDIATE_RUN: {immediate_run}")
# 检查配置文件
config_files = ["/app/config/config.yaml", "/app/config/frequency_words.txt"]
print(" 📁 配置文件:")
for file_path in config_files:
if Path(file_path).exists():
print(f" ✅ {Path(file_path).name}")
else:
print(f" ❌ {Path(file_path).name} 缺失")
# 检查关键文件
key_files = [
("/usr/local/bin/supercronic-linux-amd64", "supercronic二进制文件"),
("/usr/local/bin/supercronic", "supercronic软链接"),
("/tmp/crontab", "crontab文件"),
("/entrypoint.sh", "启动脚本")
]
print(" 📂 关键文件检查:")
for file_path, description in key_files:
if Path(file_path).exists():
print(f" ✅ {description}: 存在")
# 对于crontab文件,显示内容
if file_path == "/tmp/crontab":
try:
with open(file_path, 'r') as f:
crontab_content = f.read().strip()
print(f" 内容: {crontab_content}")
except:
pass
else:
print(f" ❌ {description}: 不存在")
# 检查容器运行时间
print(" ⏱️ 容器时间信息:")
try:
# 检查 PID 1 的启动时间
with open('/proc/1/stat', 'r') as f:
stat_content = f.read().strip().split()
if len(stat_content) >= 22:
# starttime 是第22个字段(索引21)
starttime_ticks = int(stat_content[21])
# 读取系统启动时间
with open('/proc/stat', 'r') as stat_f:
for line in stat_f:
if line.startswith('btime'):
boot_time = int(line.split()[1])
break
else:
boot_time = 0
# 读取系统时钟频率
clock_ticks = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
if boot_time > 0:
pid1_start_time = boot_time + (starttime_ticks / clock_ticks)
current_time = time.time()
uptime_seconds = int(current_time - pid1_start_time)
uptime_minutes = uptime_seconds // 60
uptime_hours = uptime_minutes // 60
if uptime_hours > 0:
print(f" PID 1 运行时间: {uptime_hours} 小时 {uptime_minutes % 60} 分钟")
else:
print(f" PID 1 运行时间: {uptime_minutes} 分钟 ({uptime_seconds} 秒)")
else:
print(f" PID 1 运行时间: 无法精确计算")
else:
print(" ❌ 无法解析 PID 1 统计信息")
except Exception as e:
print(f" ❌ 时间检查失败: {e}")
# 状态总结和建议
print(" 📊 状态总结:")
if supercronic_is_pid1:
print(" ✅ supercronic 正确运行为 PID 1")
print(" ✅ 定时任务应该正常工作")
# 显示当前的调度信息
if cron_schedule != "未设置":
print(f" ⏰ 当前调度: {cron_description}")
# 提供一些常见的调度建议
if "分钟" in cron_description and "每30分钟" not in cron_description and "每60分钟" not in cron_description:
print(" 💡 频繁执行模式,适合实时监控")
elif "小时" in cron_description:
print(" 💡 按小时执行模式,适合定期汇总")
elif "天" in cron_description:
print(" 💡 每日执行模式,适合日报生成")
print(" 💡 如果定时任务不执行,检查:")
print(" • crontab 格式是否正确")
print(" • 时区设置是否正确")
print(" • 应用程序是否有错误")
else:
print(" ❌ supercronic 状态异常")
if pid1_cmdline:
print(f" 📋 当前 PID 1: {pid1_cmdline}")
print(" 💡 建议操作:")
print(" • 重启容器: docker restart trend-radar")
print(" • 检查容器日志: docker logs trend-radar")
# 显示日志检查建议
print(" 📋 运行状态检查:")
print(" • 查看完整容器日志: docker logs trend-radar")
print(" • 查看实时日志: docker logs -f trend-radar")
print(" • 手动执行测试: python manage.py run")
print(" • 重启容器服务: docker restart trend-radar")
def show_config():
"""显示当前配置"""
print("⚙️ 当前配置:")
env_vars = [
"CRON_SCHEDULE",
"RUN_MODE",
"IMMEDIATE_RUN",
"FEISHU_WEBHOOK_URL",
"DINGTALK_WEBHOOK_URL",
"WEWORK_WEBHOOK_URL",
"TELEGRAM_BOT_TOKEN",
"TELEGRAM_CHAT_ID",
"CONFIG_PATH",
"FREQUENCY_WORDS_PATH",
]
for var in env_vars:
value = os.environ.get(var, "未设置")
# 隐藏敏感信息
if any(sensitive in var for sensitive in ["WEBHOOK", "TOKEN", "KEY"]):
if value and value != "未设置":
masked_value = value[:10] + "***" if len(value) > 10 else "***"
print(f" {var}: {masked_value}")
else:
print(f" {var}: {value}")
else:
print(f" {var}: {value}")
crontab_file = "/tmp/crontab"
if Path(crontab_file).exists():
print(" 📅 Crontab内容:")
try:
with open(crontab_file, "r") as f:
content = f.read().strip()
print(f" {content}")
except Exception as e:
print(f" 读取失败: {e}")
else:
print(" 📅 Crontab文件不存在")
def show_files():
"""显示输出文件"""
print("📁 输出文件:")
output_dir = Path("/app/output")
if not output_dir.exists():
print(" 📭 输出目录不存在")
return
# 显示最近的文件
date_dirs = sorted([d for d in output_dir.iterdir() if d.is_dir()], reverse=True)
if not date_dirs:
print(" 📭 输出目录为空")
return
# 显示最近2天的文件
for date_dir in date_dirs[:2]:
print(f" 📅 {date_dir.name}:")
for subdir in ["html", "txt"]:
sub_path = date_dir / subdir
if sub_path.exists():
files = list(sub_path.glob("*"))
if files:
recent_files = sorted(
files, key=lambda x: x.stat().st_mtime, reverse=True
)[:3]
print(f" 📂 {subdir}: {len(files)} 个文件")
for file in recent_files:
mtime = time.ctime(file.stat().st_mtime)
size_kb = file.stat().st_size // 1024
print(
f" 📄 {file.name} ({size_kb}KB, {mtime.split()[3][:5]})"
)
else:
print(f" 📂 {subdir}: 空")
def show_logs():
"""显示实时日志"""
print("📋 实时日志 (按 Ctrl+C 退出):")
print("💡 提示: 这将显示 PID 1 进程的输出")
try:
# 尝试多种方法查看日志
log_files = [
"/proc/1/fd/1", # PID 1 的标准输出
"/proc/1/fd/2", # PID 1 的标准错误
]
for log_file in log_files:
if Path(log_file).exists():
print(f"📄 尝试读取: {log_file}")
subprocess.run(["tail", "-f", log_file], check=True)
break
else:
print("📋 无法找到标准日志文件,建议使用: docker logs trend-radar")
except KeyboardInterrupt:
print("\n👋 退出日志查看")
except Exception as e:
print(f"❌ 查看日志失败: {e}")
print("💡 建议使用: docker logs trend-radar")
def restart_supercronic():
"""重启supercronic进程"""
print("🔄 重启supercronic...")
print("⚠️ 注意: supercronic 是 PID 1,无法直接重启")
# 检查当前 PID 1
try:
with open('/proc/1/cmdline', 'r') as f:
pid1_cmdline = f.read().replace('\x00', ' ').strip()
print(f" 🔍 当前 PID 1: {pid1_cmdline}")
if "supercronic" in pid1_cmdline.lower():
print(" ✅ PID 1 是 supercronic")
print(" 💡 要重启 supercronic,需要重启整个容器:")
print(" docker restart trend-radar")
else:
print(" ❌ PID 1 不是 supercronic,这是异常状态")
print(" 💡 建议重启容器以修复问题:")
print(" docker restart trend-radar")
except Exception as e:
print(f" ❌ 无法检查 PID 1: {e}")
print(" 💡 建议重启容器: docker restart trend-radar")
def show_help():
"""显示帮助信息"""
help_text = """
🐳 TrendRadar 容器管理工具
📋 命令列表:
run - 手动执行一次爬虫
status - 显示容器运行状态
config - 显示当前配置
files - 显示输出文件
logs - 实时查看日志
restart - 重启说明
help - 显示此帮助
📖 使用示例:
# 在容器中执行
python manage.py run
python manage.py status
python manage.py logs
# 在宿主机执行
docker exec -it trend-radar python manage.py run
docker exec -it trend-radar python manage.py status
docker logs trend-radar
💡 常用操作指南:
1. 检查运行状态: status
- 查看 supercronic 是否为 PID 1
- 检查配置文件和关键文件
- 查看 cron 调度设置
2. 手动执行测试: run
- 立即执行一次新闻爬取
- 测试程序是否正常工作
3. 查看日志: logs
- 实时监控运行情况
- 也可使用: docker logs trend-radar
4. 重启服务: restart
- 由于 supercronic 是 PID 1,需要重启整个容器
- 使用: docker restart trend-radar
"""
print(help_text)
def main():
if len(sys.argv) < 2:
show_help()
return
command = sys.argv[1]
commands = {
"run": manual_run,
"status": show_status,
"config": show_config,
"files": show_files,
"logs": show_logs,
"restart": restart_supercronic,
"help": show_help,
}
if command in commands:
try:
commands[command]()
except KeyboardInterrupt:
print("\n👋 操作已取消")
except Exception as e:
print(f"❌ 执行出错: {e}")
else:
print(f"❌ 未知命令: {command}")
print("运行 'python manage.py help' 查看可用命令")
if __name__ == "__main__":
main()
|
2302_81331056/TrendRadar
|
docker/manage.py
|
Python
|
agpl-3.0
| 16,239
|
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>热点新闻分析</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js" integrity="sha512-BNaRQnYJYiPSqHHDb58B0yaPfCu+Wgds8Gp/gU33kqBtgNS4tSPHuGibyoeqMV/TJlSKda6FXzoEyYGjTe+vXA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", system-ui, sans-serif;
margin: 0;
padding: 16px;
background: #fafafa;
color: #333;
line-height: 1.5;
}
.container {
max-width: 600px;
margin: 0 auto;
background: white;
border-radius: 12px;
overflow: hidden;
box-shadow: 0 2px 16px rgba(0, 0, 0, 0.06);
}
.header {
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
color: white;
padding: 32px 24px;
text-align: center;
position: relative;
}
.save-buttons {
position: absolute;
top: 16px;
right: 16px;
display: flex;
gap: 8px;
}
.save-btn {
background: rgba(255, 255, 255, 0.2);
border: 1px solid rgba(255, 255, 255, 0.3);
color: white;
padding: 8px 16px;
border-radius: 6px;
cursor: pointer;
font-size: 13px;
font-weight: 500;
transition: all 0.2s ease;
backdrop-filter: blur(10px);
white-space: nowrap;
}
.save-btn:hover {
background: rgba(255, 255, 255, 0.3);
border-color: rgba(255, 255, 255, 0.5);
transform: translateY(-1px);
}
.save-btn:active {
transform: translateY(0);
}
.save-btn:disabled {
opacity: 0.6;
cursor: not-allowed;
}
.header-title {
font-size: 22px;
font-weight: 700;
margin: 0 0 20px 0;
}
.header-info {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 16px;
font-size: 14px;
opacity: 0.95;
}
.info-item {
text-align: center;
}
.info-label {
display: block;
font-size: 12px;
opacity: 0.8;
margin-bottom: 4px;
}
.info-value {
font-weight: 600;
font-size: 16px;
}
.content {
padding: 24px;
}
.word-group {
margin-bottom: 40px;
}
.word-group:first-child {
margin-top: 0;
}
.word-header {
display: flex;
align-items: center;
justify-content: space-between;
margin-bottom: 20px;
padding-bottom: 8px;
border-bottom: 1px solid #f0f0f0;
}
.word-info {
display: flex;
align-items: center;
gap: 12px;
}
.word-name {
font-size: 17px;
font-weight: 600;
color: #1a1a1a;
}
.word-count {
color: #666;
font-size: 13px;
font-weight: 500;
}
.word-count.hot {
color: #dc2626;
font-weight: 600;
}
.word-count.warm {
color: #ea580c;
font-weight: 600;
}
.word-index {
color: #999;
font-size: 12px;
}
.news-item {
margin-bottom: 20px;
padding: 16px 0;
border-bottom: 1px solid #f5f5f5;
position: relative;
display: flex;
gap: 12px;
align-items: center;
}
.news-item:last-child {
border-bottom: none;
}
.news-number {
color: #999;
font-size: 13px;
font-weight: 600;
min-width: 20px;
text-align: center;
flex-shrink: 0;
background: #f8f9fa;
border-radius: 50%;
width: 24px;
height: 24px;
display: flex;
align-items: center;
justify-content: center;
align-self: flex-start;
margin-top: 8px;
}
.news-content {
flex: 1;
min-width: 0;
}
.news-header {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
flex-wrap: wrap;
}
.source-name {
color: #666;
font-size: 12px;
font-weight: 500;
}
.rank-num {
color: #fff;
background: #6b7280;
font-size: 10px;
font-weight: 700;
padding: 2px 6px;
border-radius: 10px;
min-width: 18px;
text-align: center;
}
.rank-num.top {
background: #dc2626;
}
.rank-num.high {
background: #ea580c;
}
.time-info {
color: #999;
font-size: 11px;
}
.count-info {
color: #059669;
font-size: 11px;
font-weight: 500;
}
.news-title {
font-size: 15px;
line-height: 1.4;
color: #1a1a1a;
margin: 0;
}
.news-link {
color: #2563eb;
text-decoration: none;
}
.news-link:hover {
text-decoration: underline;
}
.news-link:visited {
color: #7c3aed;
}
.footer {
margin-top: 32px;
padding: 20px 24px;
background: #f8f9fa;
border-top: 1px solid #e5e7eb;
text-align: center;
}
.footer-content {
font-size: 13px;
color: #6b7280;
line-height: 1.6;
}
.footer-link {
color: #4f46e5;
text-decoration: none;
font-weight: 500;
transition: color 0.2s ease;
}
.footer-link:hover {
color: #7c3aed;
text-decoration: underline;
}
.project-name {
font-weight: 600;
color: #374151;
}
@media (max-width: 480px) {
body {
padding: 12px;
}
.header {
padding: 24px 20px;
}
.content {
padding: 20px;
}
.footer {
padding: 16px 20px;
}
.header-info {
grid-template-columns: 1fr;
gap: 12px;
}
.news-header {
gap: 6px;
}
.news-item {
gap: 8px;
}
.news-number {
width: 20px;
height: 20px;
font-size: 12px;
}
.save-buttons {
position: static;
margin-bottom: 16px;
display: flex;
gap: 8px;
justify-content: center;
flex-direction: column;
width: 100%;
}
.save-btn {
width: 100%;
}
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<div class="save-buttons">
<button class="save-btn" onclick="saveAsImage()">保存为图片</button>
<button class="save-btn" onclick="saveAsMultipleImages()">分段保存</button>
</div>
<div class="header-title">热点新闻分析</div>
<div class="header-info">
<div class="info-item">
<span class="info-label">报告类型</span>
<span class="info-value">当日汇总</span>
</div>
<div class="info-item">
<span class="info-label">新闻总数</span>
<span class="info-value">387 条</span>
</div>
<div class="info-item">
<span class="info-label">热点新闻</span>
<span class="info-value">5 条</span>
</div>
<div class="info-item">
<span class="info-label">生成时间</span>
<span class="info-value">06-16 07:17</span>
</div>
</div>
</div>
<div class="content">
<div class="word-group">
<div class="word-header">
<div class="word-info">
<div class="word-name">ai 人工智能</div>
<div class="word-count hot">3 条</div>
</div>
<div class="word-index">1/4</div>
</div>
<div class="news-item">
<div class="news-number">1</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">财联社热门</span>
<span class="rank-num high">7-8</span>
<span class="time-info">00:23~07:17</span>
<span class="count-info">15次</span>
</div>
<div class="news-title">
<a href="https://www.cls.cn/detail/2057563" target="_blank" class="news-link">上市首日暴涨140% 军用无人机公司登陆纽交所 AI打造产品核心竞争力</a>
</div>
</div>
</div>
<div class="news-item">
<div class="news-number">2</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">tieba</span>
<span class="rank-num">18-19</span>
<span class="time-info">00:23~07:17</span>
<span class="count-info">15次</span>
</div>
<div class="news-title">
<a href="https://tieba.baidu.com/hottopic/browse/hottopic?topic_id=28342819&topic_name=%E4%BC%8A%E6%9C%97%E7%96%91%E7%94%A8AI%E4%BC%AA%E9%80%A0%E4%BB%A5%E5%86%9BF35%E6%AE%8B%E9%AA%B8%E5%9B%BE" target="_blank" class="news-link">伊朗疑用AI伪造以军F35残骸图</a>
</div>
</div>
</div>
<div class="news-item">
<div class="news-number">3</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">zhihu</span>
<span class="rank-num top">5-13</span>
<span class="time-info">00:23~07:17</span>
<span class="count-info">15次</span>
</div>
<div class="news-title">
<a href="https://www.zhihu.com/question/596907281" target="_blank" class="news-link">罗杰·彭罗斯说无论意识是什么,都绝对不是一种计算。意思是:任何 AI 都不可能产生意识?</a>
</div>
</div>
</div>
</div>
<div class="word-group">
<div class="word-header">
<div class="word-info">
<div class="word-name">DeepSeek 梁文锋</div>
<div class="word-count">1 条</div>
</div>
<div class="word-index">2/4</div>
</div>
<div class="news-item">
<div class="news-number">1</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">华尔街见闻</span>
<span class="rank-num high">8-9</span>
<span class="time-info">00:23~07:17</span>
<span class="count-info">15次</span>
</div>
<div class="news-title">
<a href="https://wallstreetcn.com/articles/3749141" target="_blank" class="news-link">恒生生科指数1月以来涨超60%,中国创新药的"DeepSeek时刻"超过了AI</a>
</div>
</div>
</div>
</div>
<div class="word-group">
<div class="word-header">
<div class="word-info">
<div class="word-name">哪吒 饺子</div>
<div class="word-count">1 条</div>
</div>
<div class="word-index">3/4</div>
</div>
<div class="news-item">
<div class="news-number">1</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">百度热搜</span>
<span class="rank-num">24-30</span>
<span class="time-info">00:57~06:55</span>
<span class="count-info">7次</span>
</div>
<div class="news-title">
<a href="https://www.baidu.com/s?wd=%E3%80%8A%E5%93%AA%E5%90%922%E3%80%8B%E7%89%87%E6%96%B9%E6%88%96%E5%88%86%E8%B4%A652%E4%BA%BF%E5%85%83" target="_blank" class="news-link">《哪吒2》片方或分账52亿元</a>
</div>
</div>
</div>
</div>
<div class="word-group">
<div class="word-header">
<div class="word-info">
<div class="word-name">米哈游 原神 星穹铁道</div>
<div class="word-count">1 条</div>
</div>
<div class="word-index">4/4</div>
</div>
<div class="news-item">
<div class="news-number">1</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">zhihu</span>
<span class="rank-num top">5</span>
<span class="time-info">06:55~07:17</span>
<span class="count-info">2次</span>
</div>
<div class="news-title">
<a href="https://www.zhihu.com/question/1905395386765537540" target="_blank" class="news-link">目前原神所有自机角色谁最有可能出新形态?</a>
</div>
</div>
</div>
</div>
</div>
<div class="footer">
<div class="footer-content">
由 <span class="project-name">TrendRadar</span> 生成 ·
<a href="https://github.com/sansan0/TrendRadar" target="_blank" class="footer-link">
GitHub 开源项目
</a>
</div>
</div>
</div>
<script>
async function saveAsImage() {
const button = event.target;
const originalText = button.textContent;
try {
button.textContent = '生成中...';
button.disabled = true;
window.scrollTo(0, 0);
await new Promise(resolve => setTimeout(resolve, 200));
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'hidden';
await new Promise(resolve => setTimeout(resolve, 100));
const container = document.querySelector('.container');
const canvas = await html2canvas(container, {
backgroundColor: '#ffffff',
scale: 1.5,
useCORS: true,
allowTaint: false,
imageTimeout: 10000,
removeContainer: false,
foreignObjectRendering: false,
logging: false,
width: container.offsetWidth,
height: container.offsetHeight,
x: 0,
y: 0,
scrollX: 0,
scrollY: 0,
windowWidth: window.innerWidth,
windowHeight: window.innerHeight
});
buttons.style.visibility = 'visible';
const link = document.createElement('a');
const now = new Date();
const filename = `TrendRadar_热点新闻分析_${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, '0')}${String(now.getDate()).padStart(2, '0')}_${String(now.getHours()).padStart(2, '0')}${String(now.getMinutes()).padStart(2, '0')}.png`;
link.download = filename;
link.href = canvas.toDataURL('image/png', 1.0);
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
button.textContent = '保存成功!';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
} catch (error) {
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'visible';
button.textContent = '保存失败';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
}
}
async function saveAsMultipleImages() {
const button = event.target;
const originalText = button.textContent;
const container = document.querySelector('.container');
const scale = 1.5;
const maxHeight = 5000 / scale;
try {
button.textContent = '分析中...';
button.disabled = true;
const wordGroups = Array.from(container.querySelectorAll('.word-group'));
const header = container.querySelector('.header');
const footer = container.querySelector('.footer');
const containerRect = container.getBoundingClientRect();
const elements = [];
elements.push({
type: 'header',
element: header,
top: 0,
bottom: header.offsetHeight,
height: header.offsetHeight
});
wordGroups.forEach(group => {
const groupRect = group.getBoundingClientRect();
const wordHeader = group.querySelector('.word-header');
if (wordHeader) {
const headerRect = wordHeader.getBoundingClientRect();
elements.push({
type: 'word-header',
top: groupRect.top - containerRect.top,
bottom: headerRect.bottom - containerRect.top,
height: headerRect.height
});
}
group.querySelectorAll('.news-item').forEach(item => {
const rect = item.getBoundingClientRect();
elements.push({
type: 'news-item',
top: rect.top - containerRect.top,
bottom: rect.bottom - containerRect.top,
height: rect.height
});
});
});
const footerRect = footer.getBoundingClientRect();
elements.push({
type: 'footer',
top: footerRect.top - containerRect.top,
bottom: footerRect.bottom - containerRect.top,
height: footer.offsetHeight
});
const segments = [];
let currentSegment = { start: 0, end: 0, height: 0 };
let headerHeight = header.offsetHeight;
currentSegment.height = headerHeight;
for (let i = 1; i < elements.length; i++) {
const element = elements[i];
const potentialHeight = element.bottom - currentSegment.start;
if (potentialHeight > maxHeight && currentSegment.height > headerHeight) {
currentSegment.end = elements[i - 1].bottom;
segments.push(currentSegment);
currentSegment = {
start: currentSegment.end,
end: 0,
height: element.bottom - currentSegment.end
};
} else {
currentSegment.height = potentialHeight;
currentSegment.end = element.bottom;
}
}
if (currentSegment.height > 0) {
currentSegment.end = container.offsetHeight;
segments.push(currentSegment);
}
button.textContent = `生成中 (0/${segments.length})...`;
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'hidden';
const images = [];
for (let i = 0; i < segments.length; i++) {
const segment = segments[i];
button.textContent = `生成中 (${i + 1}/${segments.length})...`;
const tempContainer = document.createElement('div');
tempContainer.style.cssText = `
position: absolute;
left: -9999px;
top: 0;
width: ${container.offsetWidth}px;
background: white;
`;
const clonedContainer = container.cloneNode(true);
const clonedButtons = clonedContainer.querySelector('.save-buttons');
if (clonedButtons) {
clonedButtons.style.display = 'none';
}
tempContainer.appendChild(clonedContainer);
document.body.appendChild(tempContainer);
await new Promise(resolve => setTimeout(resolve, 100));
const canvas = await html2canvas(clonedContainer, {
backgroundColor: '#ffffff',
scale: scale,
useCORS: true,
allowTaint: false,
imageTimeout: 10000,
logging: false,
width: container.offsetWidth,
height: segment.end - segment.start,
x: 0,
y: segment.start,
windowWidth: window.innerWidth,
windowHeight: window.innerHeight
});
images.push(canvas.toDataURL('image/png', 1.0));
document.body.removeChild(tempContainer);
}
buttons.style.visibility = 'visible';
const now = new Date();
const baseFilename = `TrendRadar_热点新闻分析_${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, '0')}${String(now.getDate()).padStart(2, '0')}_${String(now.getHours()).padStart(2, '0')}${String(now.getMinutes()).padStart(2, '0')}`;
for (let i = 0; i < images.length; i++) {
const link = document.createElement('a');
link.download = `${baseFilename}_part${i + 1}.png`;
link.href = images[i];
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
await new Promise(resolve => setTimeout(resolve, 100));
}
button.textContent = `已保存 ${segments.length} 张图片!`;
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
} catch (error) {
console.error('分段保存失败:', error);
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'visible';
button.textContent = '保存失败';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
}
}
document.addEventListener('DOMContentLoaded', function() {
window.scrollTo(0, 0);
});
</script>
</body>
</html>
|
2302_81331056/TrendRadar
|
index.html
|
HTML
|
agpl-3.0
| 26,844
|
# coding=utf-8
import json
import os
import random
import re
import time
import webbrowser
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from email.utils import formataddr, formatdate, make_msgid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union
import pytz
import requests
import yaml
VERSION = "3.3.0"
# === SMTP邮件配置 ===
SMTP_CONFIGS = {
# Gmail(使用 STARTTLS)
"gmail.com": {"server": "smtp.gmail.com", "port": 587, "encryption": "TLS"},
# QQ邮箱(使用 SSL,更稳定)
"qq.com": {"server": "smtp.qq.com", "port": 465, "encryption": "SSL"},
# Outlook(使用 STARTTLS)
"outlook.com": {
"server": "smtp-mail.outlook.com",
"port": 587,
"encryption": "TLS",
},
"hotmail.com": {
"server": "smtp-mail.outlook.com",
"port": 587,
"encryption": "TLS",
},
"live.com": {"server": "smtp-mail.outlook.com", "port": 587, "encryption": "TLS"},
# 网易邮箱(使用 SSL,更稳定)
"163.com": {"server": "smtp.163.com", "port": 465, "encryption": "SSL"},
"126.com": {"server": "smtp.126.com", "port": 465, "encryption": "SSL"},
# 新浪邮箱(使用 SSL)
"sina.com": {"server": "smtp.sina.com", "port": 465, "encryption": "SSL"},
# 搜狐邮箱(使用 SSL)
"sohu.com": {"server": "smtp.sohu.com", "port": 465, "encryption": "SSL"},
# 天翼邮箱(使用 SSL)
"189.cn": {"server": "smtp.189.cn", "port": 465, "encryption": "SSL"},
# 阿里云邮箱(使用 TLS)
"aliyun.com": {"server": "smtp.aliyun.com", "port": 465, "encryption": "TLS"},
}
# === 配置管理 ===
def load_config():
"""加载配置文件"""
config_path = os.environ.get("CONFIG_PATH", "config/config.yaml")
if not Path(config_path).exists():
raise FileNotFoundError(f"配置文件 {config_path} 不存在")
with open(config_path, "r", encoding="utf-8") as f:
config_data = yaml.safe_load(f)
print(f"配置文件加载成功: {config_path}")
# 构建配置
config = {
"VERSION_CHECK_URL": config_data["app"]["version_check_url"],
"SHOW_VERSION_UPDATE": config_data["app"]["show_version_update"],
"REQUEST_INTERVAL": config_data["crawler"]["request_interval"],
"REPORT_MODE": os.environ.get("REPORT_MODE", "").strip()
or config_data["report"]["mode"],
"RANK_THRESHOLD": config_data["report"]["rank_threshold"],
"SORT_BY_POSITION_FIRST": os.environ.get("SORT_BY_POSITION_FIRST", "").strip().lower()
in ("true", "1")
if os.environ.get("SORT_BY_POSITION_FIRST", "").strip()
else config_data["report"].get("sort_by_position_first", False),
"MAX_NEWS_PER_KEYWORD": int(
os.environ.get("MAX_NEWS_PER_KEYWORD", "").strip() or "0"
)
or config_data["report"].get("max_news_per_keyword", 0),
"USE_PROXY": config_data["crawler"]["use_proxy"],
"DEFAULT_PROXY": config_data["crawler"]["default_proxy"],
"ENABLE_CRAWLER": os.environ.get("ENABLE_CRAWLER", "").strip().lower()
in ("true", "1")
if os.environ.get("ENABLE_CRAWLER", "").strip()
else config_data["crawler"]["enable_crawler"],
"ENABLE_NOTIFICATION": os.environ.get("ENABLE_NOTIFICATION", "").strip().lower()
in ("true", "1")
if os.environ.get("ENABLE_NOTIFICATION", "").strip()
else config_data["notification"]["enable_notification"],
"MESSAGE_BATCH_SIZE": config_data["notification"]["message_batch_size"],
"DINGTALK_BATCH_SIZE": config_data["notification"].get(
"dingtalk_batch_size", 20000
),
"FEISHU_BATCH_SIZE": config_data["notification"].get("feishu_batch_size", 29000),
"BARK_BATCH_SIZE": config_data["notification"].get("bark_batch_size", 3600),
"BATCH_SEND_INTERVAL": config_data["notification"]["batch_send_interval"],
"FEISHU_MESSAGE_SEPARATOR": config_data["notification"][
"feishu_message_separator"
],
"PUSH_WINDOW": {
"ENABLED": os.environ.get("PUSH_WINDOW_ENABLED", "").strip().lower()
in ("true", "1")
if os.environ.get("PUSH_WINDOW_ENABLED", "").strip()
else config_data["notification"]
.get("push_window", {})
.get("enabled", False),
"TIME_RANGE": {
"START": os.environ.get("PUSH_WINDOW_START", "").strip()
or config_data["notification"]
.get("push_window", {})
.get("time_range", {})
.get("start", "08:00"),
"END": os.environ.get("PUSH_WINDOW_END", "").strip()
or config_data["notification"]
.get("push_window", {})
.get("time_range", {})
.get("end", "22:00"),
},
"ONCE_PER_DAY": os.environ.get("PUSH_WINDOW_ONCE_PER_DAY", "").strip().lower()
in ("true", "1")
if os.environ.get("PUSH_WINDOW_ONCE_PER_DAY", "").strip()
else config_data["notification"]
.get("push_window", {})
.get("once_per_day", True),
"RECORD_RETENTION_DAYS": int(
os.environ.get("PUSH_WINDOW_RETENTION_DAYS", "").strip() or "0"
)
or config_data["notification"]
.get("push_window", {})
.get("push_record_retention_days", 7),
},
"WEIGHT_CONFIG": {
"RANK_WEIGHT": config_data["weight"]["rank_weight"],
"FREQUENCY_WEIGHT": config_data["weight"]["frequency_weight"],
"HOTNESS_WEIGHT": config_data["weight"]["hotness_weight"],
},
"PLATFORMS": config_data["platforms"],
}
# 通知渠道配置(环境变量优先)
notification = config_data.get("notification", {})
webhooks = notification.get("webhooks", {})
config["FEISHU_WEBHOOK_URL"] = os.environ.get(
"FEISHU_WEBHOOK_URL", ""
).strip() or webhooks.get("feishu_url", "")
config["DINGTALK_WEBHOOK_URL"] = os.environ.get(
"DINGTALK_WEBHOOK_URL", ""
).strip() or webhooks.get("dingtalk_url", "")
config["WEWORK_WEBHOOK_URL"] = os.environ.get(
"WEWORK_WEBHOOK_URL", ""
).strip() or webhooks.get("wework_url", "")
config["WEWORK_MSG_TYPE"] = os.environ.get(
"WEWORK_MSG_TYPE", ""
).strip() or webhooks.get("wework_msg_type", "markdown")
config["TELEGRAM_BOT_TOKEN"] = os.environ.get(
"TELEGRAM_BOT_TOKEN", ""
).strip() or webhooks.get("telegram_bot_token", "")
config["TELEGRAM_CHAT_ID"] = os.environ.get(
"TELEGRAM_CHAT_ID", ""
).strip() or webhooks.get("telegram_chat_id", "")
# 邮件配置
config["EMAIL_FROM"] = os.environ.get("EMAIL_FROM", "").strip() or webhooks.get(
"email_from", ""
)
config["EMAIL_PASSWORD"] = os.environ.get(
"EMAIL_PASSWORD", ""
).strip() or webhooks.get("email_password", "")
config["EMAIL_TO"] = os.environ.get("EMAIL_TO", "").strip() or webhooks.get(
"email_to", ""
)
config["EMAIL_SMTP_SERVER"] = os.environ.get(
"EMAIL_SMTP_SERVER", ""
).strip() or webhooks.get("email_smtp_server", "")
config["EMAIL_SMTP_PORT"] = os.environ.get(
"EMAIL_SMTP_PORT", ""
).strip() or webhooks.get("email_smtp_port", "")
# ntfy配置
config["NTFY_SERVER_URL"] = (
os.environ.get("NTFY_SERVER_URL", "").strip()
or webhooks.get("ntfy_server_url")
or "https://ntfy.sh"
)
config["NTFY_TOPIC"] = os.environ.get("NTFY_TOPIC", "").strip() or webhooks.get(
"ntfy_topic", ""
)
config["NTFY_TOKEN"] = os.environ.get("NTFY_TOKEN", "").strip() or webhooks.get(
"ntfy_token", ""
)
# Bark配置
config["BARK_URL"] = os.environ.get("BARK_URL", "").strip() or webhooks.get(
"bark_url", ""
)
# 输出配置来源信息
notification_sources = []
if config["FEISHU_WEBHOOK_URL"]:
source = "环境变量" if os.environ.get("FEISHU_WEBHOOK_URL") else "配置文件"
notification_sources.append(f"飞书({source})")
if config["DINGTALK_WEBHOOK_URL"]:
source = "环境变量" if os.environ.get("DINGTALK_WEBHOOK_URL") else "配置文件"
notification_sources.append(f"钉钉({source})")
if config["WEWORK_WEBHOOK_URL"]:
source = "环境变量" if os.environ.get("WEWORK_WEBHOOK_URL") else "配置文件"
notification_sources.append(f"企业微信({source})")
if config["TELEGRAM_BOT_TOKEN"] and config["TELEGRAM_CHAT_ID"]:
token_source = (
"环境变量" if os.environ.get("TELEGRAM_BOT_TOKEN") else "配置文件"
)
chat_source = "环境变量" if os.environ.get("TELEGRAM_CHAT_ID") else "配置文件"
notification_sources.append(f"Telegram({token_source}/{chat_source})")
if config["EMAIL_FROM"] and config["EMAIL_PASSWORD"] and config["EMAIL_TO"]:
from_source = "环境变量" if os.environ.get("EMAIL_FROM") else "配置文件"
notification_sources.append(f"邮件({from_source})")
if config["NTFY_SERVER_URL"] and config["NTFY_TOPIC"]:
server_source = "环境变量" if os.environ.get("NTFY_SERVER_URL") else "配置文件"
notification_sources.append(f"ntfy({server_source})")
if config["BARK_URL"]:
bark_source = "环境变量" if os.environ.get("BARK_URL") else "配置文件"
notification_sources.append(f"Bark({bark_source})")
if notification_sources:
print(f"通知渠道配置来源: {', '.join(notification_sources)}")
else:
print("未配置任何通知渠道")
return config
print("正在加载配置...")
CONFIG = load_config()
print(f"TrendRadar v{VERSION} 配置加载完成")
print(f"监控平台数量: {len(CONFIG['PLATFORMS'])}")
# === 工具函数 ===
def get_beijing_time():
"""获取北京时间"""
return datetime.now(pytz.timezone("Asia/Shanghai"))
def format_date_folder():
"""格式化日期文件夹"""
return get_beijing_time().strftime("%Y年%m月%d日")
def format_time_filename():
"""格式化时间文件名"""
return get_beijing_time().strftime("%H时%M分")
def clean_title(title: str) -> str:
"""清理标题中的特殊字符"""
if not isinstance(title, str):
title = str(title)
cleaned_title = title.replace("\n", " ").replace("\r", " ")
cleaned_title = re.sub(r"\s+", " ", cleaned_title)
cleaned_title = cleaned_title.strip()
return cleaned_title
def ensure_directory_exists(directory: str):
"""确保目录存在"""
Path(directory).mkdir(parents=True, exist_ok=True)
def get_output_path(subfolder: str, filename: str) -> str:
"""获取输出路径"""
date_folder = format_date_folder()
output_dir = Path("output") / date_folder / subfolder
ensure_directory_exists(str(output_dir))
return str(output_dir / filename)
def check_version_update(
current_version: str, version_url: str, proxy_url: Optional[str] = None
) -> Tuple[bool, Optional[str]]:
"""检查版本更新"""
try:
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Accept": "text/plain, */*",
"Cache-Control": "no-cache",
}
response = requests.get(
version_url, proxies=proxies, headers=headers, timeout=10
)
response.raise_for_status()
remote_version = response.text.strip()
print(f"当前版本: {current_version}, 远程版本: {remote_version}")
# 比较版本
def parse_version(version_str):
try:
parts = version_str.strip().split(".")
if len(parts) != 3:
raise ValueError("版本号格式不正确")
return int(parts[0]), int(parts[1]), int(parts[2])
except:
return 0, 0, 0
current_tuple = parse_version(current_version)
remote_tuple = parse_version(remote_version)
need_update = current_tuple < remote_tuple
return need_update, remote_version if need_update else None
except Exception as e:
print(f"版本检查失败: {e}")
return False, None
def is_first_crawl_today() -> bool:
"""检测是否是当天第一次爬取"""
date_folder = format_date_folder()
txt_dir = Path("output") / date_folder / "txt"
if not txt_dir.exists():
return True
files = sorted([f for f in txt_dir.iterdir() if f.suffix == ".txt"])
return len(files) <= 1
def html_escape(text: str) -> str:
"""HTML转义"""
if not isinstance(text, str):
text = str(text)
return (
text.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
.replace("'", "'")
)
# === 推送记录管理 ===
class PushRecordManager:
"""推送记录管理器"""
def __init__(self):
self.record_dir = Path("output") / ".push_records"
self.ensure_record_dir()
self.cleanup_old_records()
def ensure_record_dir(self):
"""确保记录目录存在"""
self.record_dir.mkdir(parents=True, exist_ok=True)
def get_today_record_file(self) -> Path:
"""获取今天的记录文件路径"""
today = get_beijing_time().strftime("%Y%m%d")
return self.record_dir / f"push_record_{today}.json"
def cleanup_old_records(self):
"""清理过期的推送记录"""
retention_days = CONFIG["PUSH_WINDOW"]["RECORD_RETENTION_DAYS"]
current_time = get_beijing_time()
for record_file in self.record_dir.glob("push_record_*.json"):
try:
date_str = record_file.stem.replace("push_record_", "")
file_date = datetime.strptime(date_str, "%Y%m%d")
file_date = pytz.timezone("Asia/Shanghai").localize(file_date)
if (current_time - file_date).days > retention_days:
record_file.unlink()
print(f"清理过期推送记录: {record_file.name}")
except Exception as e:
print(f"清理记录文件失败 {record_file}: {e}")
def has_pushed_today(self) -> bool:
"""检查今天是否已经推送过"""
record_file = self.get_today_record_file()
if not record_file.exists():
return False
try:
with open(record_file, "r", encoding="utf-8") as f:
record = json.load(f)
return record.get("pushed", False)
except Exception as e:
print(f"读取推送记录失败: {e}")
return False
def record_push(self, report_type: str):
"""记录推送"""
record_file = self.get_today_record_file()
now = get_beijing_time()
record = {
"pushed": True,
"push_time": now.strftime("%Y-%m-%d %H:%M:%S"),
"report_type": report_type,
}
try:
with open(record_file, "w", encoding="utf-8") as f:
json.dump(record, f, ensure_ascii=False, indent=2)
print(f"推送记录已保存: {report_type} at {now.strftime('%H:%M:%S')}")
except Exception as e:
print(f"保存推送记录失败: {e}")
def is_in_time_range(self, start_time: str, end_time: str) -> bool:
"""检查当前时间是否在指定时间范围内"""
now = get_beijing_time()
current_time = now.strftime("%H:%M")
def normalize_time(time_str: str) -> str:
"""将时间字符串标准化为 HH:MM 格式"""
try:
parts = time_str.strip().split(":")
if len(parts) != 2:
raise ValueError(f"时间格式错误: {time_str}")
hour = int(parts[0])
minute = int(parts[1])
if not (0 <= hour <= 23 and 0 <= minute <= 59):
raise ValueError(f"时间范围错误: {time_str}")
return f"{hour:02d}:{minute:02d}"
except Exception as e:
print(f"时间格式化错误 '{time_str}': {e}")
return time_str
normalized_start = normalize_time(start_time)
normalized_end = normalize_time(end_time)
normalized_current = normalize_time(current_time)
result = normalized_start <= normalized_current <= normalized_end
if not result:
print(f"时间窗口判断:当前 {normalized_current},窗口 {normalized_start}-{normalized_end}")
return result
# === 数据获取 ===
class DataFetcher:
"""数据获取器"""
def __init__(self, proxy_url: Optional[str] = None):
self.proxy_url = proxy_url
def fetch_data(
self,
id_info: Union[str, Tuple[str, str]],
max_retries: int = 2,
min_retry_wait: int = 3,
max_retry_wait: int = 5,
) -> Tuple[Optional[str], str, str]:
"""获取指定ID数据,支持重试"""
if isinstance(id_info, tuple):
id_value, alias = id_info
else:
id_value = id_info
alias = id_value
url = f"https://newsnow.busiyi.world/api/s?id={id_value}&latest"
proxies = None
if self.proxy_url:
proxies = {"http": self.proxy_url, "https": self.proxy_url}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cache-Control": "no-cache",
}
retries = 0
while retries <= max_retries:
try:
response = requests.get(
url, proxies=proxies, headers=headers, timeout=10
)
response.raise_for_status()
data_text = response.text
data_json = json.loads(data_text)
status = data_json.get("status", "未知")
if status not in ["success", "cache"]:
raise ValueError(f"响应状态异常: {status}")
status_info = "最新数据" if status == "success" else "缓存数据"
print(f"获取 {id_value} 成功({status_info})")
return data_text, id_value, alias
except Exception as e:
retries += 1
if retries <= max_retries:
base_wait = random.uniform(min_retry_wait, max_retry_wait)
additional_wait = (retries - 1) * random.uniform(1, 2)
wait_time = base_wait + additional_wait
print(f"请求 {id_value} 失败: {e}. {wait_time:.2f}秒后重试...")
time.sleep(wait_time)
else:
print(f"请求 {id_value} 失败: {e}")
return None, id_value, alias
return None, id_value, alias
def crawl_websites(
self,
ids_list: List[Union[str, Tuple[str, str]]],
request_interval: int = CONFIG["REQUEST_INTERVAL"],
) -> Tuple[Dict, Dict, List]:
"""爬取多个网站数据"""
results = {}
id_to_name = {}
failed_ids = []
for i, id_info in enumerate(ids_list):
if isinstance(id_info, tuple):
id_value, name = id_info
else:
id_value = id_info
name = id_value
id_to_name[id_value] = name
response, _, _ = self.fetch_data(id_info)
if response:
try:
data = json.loads(response)
results[id_value] = {}
for index, item in enumerate(data.get("items", []), 1):
title = item.get("title")
# 跳过无效标题(None、float、空字符串)
if title is None or isinstance(title, float) or not str(title).strip():
continue
title = str(title).strip()
url = item.get("url", "")
mobile_url = item.get("mobileUrl", "")
if title in results[id_value]:
results[id_value][title]["ranks"].append(index)
else:
results[id_value][title] = {
"ranks": [index],
"url": url,
"mobileUrl": mobile_url,
}
except json.JSONDecodeError:
print(f"解析 {id_value} 响应失败")
failed_ids.append(id_value)
except Exception as e:
print(f"处理 {id_value} 数据出错: {e}")
failed_ids.append(id_value)
else:
failed_ids.append(id_value)
if i < len(ids_list) - 1:
actual_interval = request_interval + random.randint(-10, 20)
actual_interval = max(50, actual_interval)
time.sleep(actual_interval / 1000)
print(f"成功: {list(results.keys())}, 失败: {failed_ids}")
return results, id_to_name, failed_ids
# === 数据处理 ===
def save_titles_to_file(results: Dict, id_to_name: Dict, failed_ids: List) -> str:
"""保存标题到文件"""
file_path = get_output_path("txt", f"{format_time_filename()}.txt")
with open(file_path, "w", encoding="utf-8") as f:
for id_value, title_data in results.items():
# id | name 或 id
name = id_to_name.get(id_value)
if name and name != id_value:
f.write(f"{id_value} | {name}\n")
else:
f.write(f"{id_value}\n")
# 按排名排序标题
sorted_titles = []
for title, info in title_data.items():
cleaned_title = clean_title(title)
if isinstance(info, dict):
ranks = info.get("ranks", [])
url = info.get("url", "")
mobile_url = info.get("mobileUrl", "")
else:
ranks = info if isinstance(info, list) else []
url = ""
mobile_url = ""
rank = ranks[0] if ranks else 1
sorted_titles.append((rank, cleaned_title, url, mobile_url))
sorted_titles.sort(key=lambda x: x[0])
for rank, cleaned_title, url, mobile_url in sorted_titles:
line = f"{rank}. {cleaned_title}"
if url:
line += f" [URL:{url}]"
if mobile_url:
line += f" [MOBILE:{mobile_url}]"
f.write(line + "\n")
f.write("\n")
if failed_ids:
f.write("==== 以下ID请求失败 ====\n")
for id_value in failed_ids:
f.write(f"{id_value}\n")
return file_path
def load_frequency_words(
frequency_file: Optional[str] = None,
) -> Tuple[List[Dict], List[str]]:
"""加载频率词配置"""
if frequency_file is None:
frequency_file = os.environ.get(
"FREQUENCY_WORDS_PATH", "config/frequency_words.txt"
)
frequency_path = Path(frequency_file)
if not frequency_path.exists():
raise FileNotFoundError(f"频率词文件 {frequency_file} 不存在")
with open(frequency_path, "r", encoding="utf-8") as f:
content = f.read()
word_groups = [group.strip() for group in content.split("\n\n") if group.strip()]
processed_groups = []
filter_words = []
for group in word_groups:
words = [word.strip() for word in group.split("\n") if word.strip()]
group_required_words = []
group_normal_words = []
group_filter_words = []
group_max_count = 0 # 默认不限制
for word in words:
if word.startswith("@"):
# 解析最大显示数量(只接受正整数)
try:
count = int(word[1:])
if count > 0:
group_max_count = count
except (ValueError, IndexError):
pass # 忽略无效的@数字格式
elif word.startswith("!"):
filter_words.append(word[1:])
group_filter_words.append(word[1:])
elif word.startswith("+"):
group_required_words.append(word[1:])
else:
group_normal_words.append(word)
if group_required_words or group_normal_words:
if group_normal_words:
group_key = " ".join(group_normal_words)
else:
group_key = " ".join(group_required_words)
processed_groups.append(
{
"required": group_required_words,
"normal": group_normal_words,
"group_key": group_key,
"max_count": group_max_count, # 新增字段
}
)
return processed_groups, filter_words
def parse_file_titles(file_path: Path) -> Tuple[Dict, Dict]:
"""解析单个txt文件的标题数据,返回(titles_by_id, id_to_name)"""
titles_by_id = {}
id_to_name = {}
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
sections = content.split("\n\n")
for section in sections:
if not section.strip() or "==== 以下ID请求失败 ====" in section:
continue
lines = section.strip().split("\n")
if len(lines) < 2:
continue
# id | name 或 id
header_line = lines[0].strip()
if " | " in header_line:
parts = header_line.split(" | ", 1)
source_id = parts[0].strip()
name = parts[1].strip()
id_to_name[source_id] = name
else:
source_id = header_line
id_to_name[source_id] = source_id
titles_by_id[source_id] = {}
for line in lines[1:]:
if line.strip():
try:
title_part = line.strip()
rank = None
# 提取排名
if ". " in title_part and title_part.split(". ")[0].isdigit():
rank_str, title_part = title_part.split(". ", 1)
rank = int(rank_str)
# 提取 MOBILE URL
mobile_url = ""
if " [MOBILE:" in title_part:
title_part, mobile_part = title_part.rsplit(" [MOBILE:", 1)
if mobile_part.endswith("]"):
mobile_url = mobile_part[:-1]
# 提取 URL
url = ""
if " [URL:" in title_part:
title_part, url_part = title_part.rsplit(" [URL:", 1)
if url_part.endswith("]"):
url = url_part[:-1]
title = clean_title(title_part.strip())
ranks = [rank] if rank is not None else [1]
titles_by_id[source_id][title] = {
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
except Exception as e:
print(f"解析标题行出错: {line}, 错误: {e}")
return titles_by_id, id_to_name
def read_all_today_titles(
current_platform_ids: Optional[List[str]] = None,
) -> Tuple[Dict, Dict, Dict]:
"""读取当天所有标题文件,支持按当前监控平台过滤"""
date_folder = format_date_folder()
txt_dir = Path("output") / date_folder / "txt"
if not txt_dir.exists():
return {}, {}, {}
all_results = {}
final_id_to_name = {}
title_info = {}
files = sorted([f for f in txt_dir.iterdir() if f.suffix == ".txt"])
for file_path in files:
time_info = file_path.stem
titles_by_id, file_id_to_name = parse_file_titles(file_path)
if current_platform_ids is not None:
filtered_titles_by_id = {}
filtered_id_to_name = {}
for source_id, title_data in titles_by_id.items():
if source_id in current_platform_ids:
filtered_titles_by_id[source_id] = title_data
if source_id in file_id_to_name:
filtered_id_to_name[source_id] = file_id_to_name[source_id]
titles_by_id = filtered_titles_by_id
file_id_to_name = filtered_id_to_name
final_id_to_name.update(file_id_to_name)
for source_id, title_data in titles_by_id.items():
process_source_data(
source_id, title_data, time_info, all_results, title_info
)
return all_results, final_id_to_name, title_info
def process_source_data(
source_id: str,
title_data: Dict,
time_info: str,
all_results: Dict,
title_info: Dict,
) -> None:
"""处理来源数据,合并重复标题"""
if source_id not in all_results:
all_results[source_id] = title_data
if source_id not in title_info:
title_info[source_id] = {}
for title, data in title_data.items():
ranks = data.get("ranks", [])
url = data.get("url", "")
mobile_url = data.get("mobileUrl", "")
title_info[source_id][title] = {
"first_time": time_info,
"last_time": time_info,
"count": 1,
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
else:
for title, data in title_data.items():
ranks = data.get("ranks", [])
url = data.get("url", "")
mobile_url = data.get("mobileUrl", "")
if title not in all_results[source_id]:
all_results[source_id][title] = {
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
title_info[source_id][title] = {
"first_time": time_info,
"last_time": time_info,
"count": 1,
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
else:
existing_data = all_results[source_id][title]
existing_ranks = existing_data.get("ranks", [])
existing_url = existing_data.get("url", "")
existing_mobile_url = existing_data.get("mobileUrl", "")
merged_ranks = existing_ranks.copy()
for rank in ranks:
if rank not in merged_ranks:
merged_ranks.append(rank)
all_results[source_id][title] = {
"ranks": merged_ranks,
"url": existing_url or url,
"mobileUrl": existing_mobile_url or mobile_url,
}
title_info[source_id][title]["last_time"] = time_info
title_info[source_id][title]["ranks"] = merged_ranks
title_info[source_id][title]["count"] += 1
if not title_info[source_id][title].get("url"):
title_info[source_id][title]["url"] = url
if not title_info[source_id][title].get("mobileUrl"):
title_info[source_id][title]["mobileUrl"] = mobile_url
def detect_latest_new_titles(current_platform_ids: Optional[List[str]] = None) -> Dict:
"""检测当日最新批次的新增标题,支持按当前监控平台过滤"""
date_folder = format_date_folder()
txt_dir = Path("output") / date_folder / "txt"
if not txt_dir.exists():
return {}
files = sorted([f for f in txt_dir.iterdir() if f.suffix == ".txt"])
if len(files) < 2:
return {}
# 解析最新文件
latest_file = files[-1]
latest_titles, _ = parse_file_titles(latest_file)
# 如果指定了当前平台列表,过滤最新文件数据
if current_platform_ids is not None:
filtered_latest_titles = {}
for source_id, title_data in latest_titles.items():
if source_id in current_platform_ids:
filtered_latest_titles[source_id] = title_data
latest_titles = filtered_latest_titles
# 汇总历史标题(按平台过滤)
historical_titles = {}
for file_path in files[:-1]:
historical_data, _ = parse_file_titles(file_path)
# 过滤历史数据
if current_platform_ids is not None:
filtered_historical_data = {}
for source_id, title_data in historical_data.items():
if source_id in current_platform_ids:
filtered_historical_data[source_id] = title_data
historical_data = filtered_historical_data
for source_id, titles_data in historical_data.items():
if source_id not in historical_titles:
historical_titles[source_id] = set()
for title in titles_data.keys():
historical_titles[source_id].add(title)
# 找出新增标题
new_titles = {}
for source_id, latest_source_titles in latest_titles.items():
historical_set = historical_titles.get(source_id, set())
source_new_titles = {}
for title, title_data in latest_source_titles.items():
if title not in historical_set:
source_new_titles[title] = title_data
if source_new_titles:
new_titles[source_id] = source_new_titles
return new_titles
# === 统计和分析 ===
def calculate_news_weight(
title_data: Dict, rank_threshold: int = CONFIG["RANK_THRESHOLD"]
) -> float:
"""计算新闻权重,用于排序"""
ranks = title_data.get("ranks", [])
if not ranks:
return 0.0
count = title_data.get("count", len(ranks))
weight_config = CONFIG["WEIGHT_CONFIG"]
# 排名权重:Σ(11 - min(rank, 10)) / 出现次数
rank_scores = []
for rank in ranks:
score = 11 - min(rank, 10)
rank_scores.append(score)
rank_weight = sum(rank_scores) / len(ranks) if ranks else 0
# 频次权重:min(出现次数, 10) × 10
frequency_weight = min(count, 10) * 10
# 热度加成:高排名次数 / 总出现次数 × 100
high_rank_count = sum(1 for rank in ranks if rank <= rank_threshold)
hotness_ratio = high_rank_count / len(ranks) if ranks else 0
hotness_weight = hotness_ratio * 100
total_weight = (
rank_weight * weight_config["RANK_WEIGHT"]
+ frequency_weight * weight_config["FREQUENCY_WEIGHT"]
+ hotness_weight * weight_config["HOTNESS_WEIGHT"]
)
return total_weight
def matches_word_groups(
title: str, word_groups: List[Dict], filter_words: List[str]
) -> bool:
"""检查标题是否匹配词组规则"""
# 防御性类型检查:确保 title 是有效字符串
if not isinstance(title, str):
title = str(title) if title is not None else ""
if not title.strip():
return False
# 如果没有配置词组,则匹配所有标题(支持显示全部新闻)
if not word_groups:
return True
title_lower = title.lower()
# 过滤词检查
if any(filter_word.lower() in title_lower for filter_word in filter_words):
return False
# 词组匹配检查
for group in word_groups:
required_words = group["required"]
normal_words = group["normal"]
# 必须词检查
if required_words:
all_required_present = all(
req_word.lower() in title_lower for req_word in required_words
)
if not all_required_present:
continue
# 普通词检查
if normal_words:
any_normal_present = any(
normal_word.lower() in title_lower for normal_word in normal_words
)
if not any_normal_present:
continue
return True
return False
def format_time_display(first_time: str, last_time: str) -> str:
"""格式化时间显示"""
if not first_time:
return ""
if first_time == last_time or not last_time:
return first_time
else:
return f"[{first_time} ~ {last_time}]"
def format_rank_display(ranks: List[int], rank_threshold: int, format_type: str) -> str:
"""统一的排名格式化方法"""
if not ranks:
return ""
unique_ranks = sorted(set(ranks))
min_rank = unique_ranks[0]
max_rank = unique_ranks[-1]
if format_type == "html":
highlight_start = "<font color='red'><strong>"
highlight_end = "</strong></font>"
elif format_type == "feishu":
highlight_start = "<font color='red'>**"
highlight_end = "**</font>"
elif format_type == "dingtalk":
highlight_start = "**"
highlight_end = "**"
elif format_type == "wework":
highlight_start = "**"
highlight_end = "**"
elif format_type == "telegram":
highlight_start = "<b>"
highlight_end = "</b>"
else:
highlight_start = "**"
highlight_end = "**"
if min_rank <= rank_threshold:
if min_rank == max_rank:
return f"{highlight_start}[{min_rank}]{highlight_end}"
else:
return f"{highlight_start}[{min_rank} - {max_rank}]{highlight_end}"
else:
if min_rank == max_rank:
return f"[{min_rank}]"
else:
return f"[{min_rank} - {max_rank}]"
def count_word_frequency(
results: Dict,
word_groups: List[Dict],
filter_words: List[str],
id_to_name: Dict,
title_info: Optional[Dict] = None,
rank_threshold: int = CONFIG["RANK_THRESHOLD"],
new_titles: Optional[Dict] = None,
mode: str = "daily",
) -> Tuple[List[Dict], int]:
"""统计词频,支持必须词、频率词、过滤词,并标记新增标题"""
# 如果没有配置词组,创建一个包含所有新闻的虚拟词组
if not word_groups:
print("频率词配置为空,将显示所有新闻")
word_groups = [{"required": [], "normal": [], "group_key": "全部新闻"}]
filter_words = [] # 清空过滤词,显示所有新闻
is_first_today = is_first_crawl_today()
# 确定处理的数据源和新增标记逻辑
if mode == "incremental":
if is_first_today:
# 增量模式 + 当天第一次:处理所有新闻,都标记为新增
results_to_process = results
all_news_are_new = True
else:
# 增量模式 + 当天非第一次:只处理新增的新闻
results_to_process = new_titles if new_titles else {}
all_news_are_new = True
elif mode == "current":
# current 模式:只处理当前时间批次的新闻,但统计信息来自全部历史
if title_info:
latest_time = None
for source_titles in title_info.values():
for title_data in source_titles.values():
last_time = title_data.get("last_time", "")
if last_time:
if latest_time is None or last_time > latest_time:
latest_time = last_time
# 只处理 last_time 等于最新时间的新闻
if latest_time:
results_to_process = {}
for source_id, source_titles in results.items():
if source_id in title_info:
filtered_titles = {}
for title, title_data in source_titles.items():
if title in title_info[source_id]:
info = title_info[source_id][title]
if info.get("last_time") == latest_time:
filtered_titles[title] = title_data
if filtered_titles:
results_to_process[source_id] = filtered_titles
print(
f"当前榜单模式:最新时间 {latest_time},筛选出 {sum(len(titles) for titles in results_to_process.values())} 条当前榜单新闻"
)
else:
results_to_process = results
else:
results_to_process = results
all_news_are_new = False
else:
# 当日汇总模式:处理所有新闻
results_to_process = results
all_news_are_new = False
total_input_news = sum(len(titles) for titles in results.values())
filter_status = (
"全部显示"
if len(word_groups) == 1 and word_groups[0]["group_key"] == "全部新闻"
else "频率词过滤"
)
print(f"当日汇总模式:处理 {total_input_news} 条新闻,模式:{filter_status}")
word_stats = {}
total_titles = 0
processed_titles = {}
matched_new_count = 0
if title_info is None:
title_info = {}
if new_titles is None:
new_titles = {}
for group in word_groups:
group_key = group["group_key"]
word_stats[group_key] = {"count": 0, "titles": {}}
for source_id, titles_data in results_to_process.items():
total_titles += len(titles_data)
if source_id not in processed_titles:
processed_titles[source_id] = {}
for title, title_data in titles_data.items():
if title in processed_titles.get(source_id, {}):
continue
# 使用统一的匹配逻辑
matches_frequency_words = matches_word_groups(
title, word_groups, filter_words
)
if not matches_frequency_words:
continue
# 如果是增量模式或 current 模式第一次,统计匹配的新增新闻数量
if (mode == "incremental" and all_news_are_new) or (
mode == "current" and is_first_today
):
matched_new_count += 1
source_ranks = title_data.get("ranks", [])
source_url = title_data.get("url", "")
source_mobile_url = title_data.get("mobileUrl", "")
# 找到匹配的词组(防御性转换确保类型安全)
title_lower = str(title).lower() if not isinstance(title, str) else title.lower()
for group in word_groups:
required_words = group["required"]
normal_words = group["normal"]
# 如果是"全部新闻"模式,所有标题都匹配第一个(唯一的)词组
if len(word_groups) == 1 and word_groups[0]["group_key"] == "全部新闻":
group_key = group["group_key"]
word_stats[group_key]["count"] += 1
if source_id not in word_stats[group_key]["titles"]:
word_stats[group_key]["titles"][source_id] = []
else:
# 原有的匹配逻辑
if required_words:
all_required_present = all(
req_word.lower() in title_lower
for req_word in required_words
)
if not all_required_present:
continue
if normal_words:
any_normal_present = any(
normal_word.lower() in title_lower
for normal_word in normal_words
)
if not any_normal_present:
continue
group_key = group["group_key"]
word_stats[group_key]["count"] += 1
if source_id not in word_stats[group_key]["titles"]:
word_stats[group_key]["titles"][source_id] = []
first_time = ""
last_time = ""
count_info = 1
ranks = source_ranks if source_ranks else []
url = source_url
mobile_url = source_mobile_url
# 对于 current 模式,从历史统计信息中获取完整数据
if (
mode == "current"
and title_info
and source_id in title_info
and title in title_info[source_id]
):
info = title_info[source_id][title]
first_time = info.get("first_time", "")
last_time = info.get("last_time", "")
count_info = info.get("count", 1)
if "ranks" in info and info["ranks"]:
ranks = info["ranks"]
url = info.get("url", source_url)
mobile_url = info.get("mobileUrl", source_mobile_url)
elif (
title_info
and source_id in title_info
and title in title_info[source_id]
):
info = title_info[source_id][title]
first_time = info.get("first_time", "")
last_time = info.get("last_time", "")
count_info = info.get("count", 1)
if "ranks" in info and info["ranks"]:
ranks = info["ranks"]
url = info.get("url", source_url)
mobile_url = info.get("mobileUrl", source_mobile_url)
if not ranks:
ranks = [99]
time_display = format_time_display(first_time, last_time)
source_name = id_to_name.get(source_id, source_id)
# 判断是否为新增
is_new = False
if all_news_are_new:
# 增量模式下所有处理的新闻都是新增,或者当天第一次的所有新闻都是新增
is_new = True
elif new_titles and source_id in new_titles:
# 检查是否在新增列表中
new_titles_for_source = new_titles[source_id]
is_new = title in new_titles_for_source
word_stats[group_key]["titles"][source_id].append(
{
"title": title,
"source_name": source_name,
"first_time": first_time,
"last_time": last_time,
"time_display": time_display,
"count": count_info,
"ranks": ranks,
"rank_threshold": rank_threshold,
"url": url,
"mobileUrl": mobile_url,
"is_new": is_new,
}
)
if source_id not in processed_titles:
processed_titles[source_id] = {}
processed_titles[source_id][title] = True
break
# 最后统一打印汇总信息
if mode == "incremental":
if is_first_today:
total_input_news = sum(len(titles) for titles in results.values())
filter_status = (
"全部显示"
if len(word_groups) == 1 and word_groups[0]["group_key"] == "全部新闻"
else "频率词匹配"
)
print(
f"增量模式:当天第一次爬取,{total_input_news} 条新闻中有 {matched_new_count} 条{filter_status}"
)
else:
if new_titles:
total_new_count = sum(len(titles) for titles in new_titles.values())
filter_status = (
"全部显示"
if len(word_groups) == 1
and word_groups[0]["group_key"] == "全部新闻"
else "匹配频率词"
)
print(
f"增量模式:{total_new_count} 条新增新闻中,有 {matched_new_count} 条{filter_status}"
)
if matched_new_count == 0 and len(word_groups) > 1:
print("增量模式:没有新增新闻匹配频率词,将不会发送通知")
else:
print("增量模式:未检测到新增新闻")
elif mode == "current":
total_input_news = sum(len(titles) for titles in results_to_process.values())
if is_first_today:
filter_status = (
"全部显示"
if len(word_groups) == 1 and word_groups[0]["group_key"] == "全部新闻"
else "频率词匹配"
)
print(
f"当前榜单模式:当天第一次爬取,{total_input_news} 条当前榜单新闻中有 {matched_new_count} 条{filter_status}"
)
else:
matched_count = sum(stat["count"] for stat in word_stats.values())
filter_status = (
"全部显示"
if len(word_groups) == 1 and word_groups[0]["group_key"] == "全部新闻"
else "频率词匹配"
)
print(
f"当前榜单模式:{total_input_news} 条当前榜单新闻中有 {matched_count} 条{filter_status}"
)
stats = []
# 创建 group_key 到位置和最大数量的映射
group_key_to_position = {
group["group_key"]: idx for idx, group in enumerate(word_groups)
}
group_key_to_max_count = {
group["group_key"]: group.get("max_count", 0) for group in word_groups
}
for group_key, data in word_stats.items():
all_titles = []
for source_id, title_list in data["titles"].items():
all_titles.extend(title_list)
# 按权重排序
sorted_titles = sorted(
all_titles,
key=lambda x: (
-calculate_news_weight(x, rank_threshold),
min(x["ranks"]) if x["ranks"] else 999,
-x["count"],
),
)
# 应用最大显示数量限制(优先级:单独配置 > 全局配置)
group_max_count = group_key_to_max_count.get(group_key, 0)
if group_max_count == 0:
# 使用全局配置
group_max_count = CONFIG.get("MAX_NEWS_PER_KEYWORD", 0)
if group_max_count > 0:
sorted_titles = sorted_titles[:group_max_count]
stats.append(
{
"word": group_key,
"count": data["count"],
"position": group_key_to_position.get(group_key, 999),
"titles": sorted_titles,
"percentage": (
round(data["count"] / total_titles * 100, 2)
if total_titles > 0
else 0
),
}
)
# 根据配置选择排序优先级
if CONFIG.get("SORT_BY_POSITION_FIRST", False):
# 先按配置位置,再按热点条数
stats.sort(key=lambda x: (x["position"], -x["count"]))
else:
# 先按热点条数,再按配置位置(原逻辑)
stats.sort(key=lambda x: (-x["count"], x["position"]))
return stats, total_titles
# === 报告生成 ===
def prepare_report_data(
stats: List[Dict],
failed_ids: Optional[List] = None,
new_titles: Optional[Dict] = None,
id_to_name: Optional[Dict] = None,
mode: str = "daily",
) -> Dict:
"""准备报告数据"""
processed_new_titles = []
# 在增量模式下隐藏新增新闻区域
hide_new_section = mode == "incremental"
# 只有在非隐藏模式下才处理新增新闻部分
if not hide_new_section:
filtered_new_titles = {}
if new_titles and id_to_name:
word_groups, filter_words = load_frequency_words()
for source_id, titles_data in new_titles.items():
filtered_titles = {}
for title, title_data in titles_data.items():
if matches_word_groups(title, word_groups, filter_words):
filtered_titles[title] = title_data
if filtered_titles:
filtered_new_titles[source_id] = filtered_titles
if filtered_new_titles and id_to_name:
for source_id, titles_data in filtered_new_titles.items():
source_name = id_to_name.get(source_id, source_id)
source_titles = []
for title, title_data in titles_data.items():
url = title_data.get("url", "")
mobile_url = title_data.get("mobileUrl", "")
ranks = title_data.get("ranks", [])
processed_title = {
"title": title,
"source_name": source_name,
"time_display": "",
"count": 1,
"ranks": ranks,
"rank_threshold": CONFIG["RANK_THRESHOLD"],
"url": url,
"mobile_url": mobile_url,
"is_new": True,
}
source_titles.append(processed_title)
if source_titles:
processed_new_titles.append(
{
"source_id": source_id,
"source_name": source_name,
"titles": source_titles,
}
)
processed_stats = []
for stat in stats:
if stat["count"] <= 0:
continue
processed_titles = []
for title_data in stat["titles"]:
processed_title = {
"title": title_data["title"],
"source_name": title_data["source_name"],
"time_display": title_data["time_display"],
"count": title_data["count"],
"ranks": title_data["ranks"],
"rank_threshold": title_data["rank_threshold"],
"url": title_data.get("url", ""),
"mobile_url": title_data.get("mobileUrl", ""),
"is_new": title_data.get("is_new", False),
}
processed_titles.append(processed_title)
processed_stats.append(
{
"word": stat["word"],
"count": stat["count"],
"percentage": stat.get("percentage", 0),
"titles": processed_titles,
}
)
return {
"stats": processed_stats,
"new_titles": processed_new_titles,
"failed_ids": failed_ids or [],
"total_new_count": sum(
len(source["titles"]) for source in processed_new_titles
),
}
def format_title_for_platform(
platform: str, title_data: Dict, show_source: bool = True
) -> str:
"""统一的标题格式化方法"""
rank_display = format_rank_display(
title_data["ranks"], title_data["rank_threshold"], platform
)
link_url = title_data["mobile_url"] or title_data["url"]
cleaned_title = clean_title(title_data["title"])
if platform == "feishu":
if link_url:
formatted_title = f"[{cleaned_title}]({link_url})"
else:
formatted_title = cleaned_title
title_prefix = "🆕 " if title_data.get("is_new") else ""
if show_source:
result = f"<font color='grey'>[{title_data['source_name']}]</font> {title_prefix}{formatted_title}"
else:
result = f"{title_prefix}{formatted_title}"
if rank_display:
result += f" {rank_display}"
if title_data["time_display"]:
result += f" <font color='grey'>- {title_data['time_display']}</font>"
if title_data["count"] > 1:
result += f" <font color='green'>({title_data['count']}次)</font>"
return result
elif platform == "dingtalk":
if link_url:
formatted_title = f"[{cleaned_title}]({link_url})"
else:
formatted_title = cleaned_title
title_prefix = "🆕 " if title_data.get("is_new") else ""
if show_source:
result = f"[{title_data['source_name']}] {title_prefix}{formatted_title}"
else:
result = f"{title_prefix}{formatted_title}"
if rank_display:
result += f" {rank_display}"
if title_data["time_display"]:
result += f" - {title_data['time_display']}"
if title_data["count"] > 1:
result += f" ({title_data['count']}次)"
return result
elif platform == "wework":
if link_url:
formatted_title = f"[{cleaned_title}]({link_url})"
else:
formatted_title = cleaned_title
title_prefix = "🆕 " if title_data.get("is_new") else ""
if show_source:
result = f"[{title_data['source_name']}] {title_prefix}{formatted_title}"
else:
result = f"{title_prefix}{formatted_title}"
if rank_display:
result += f" {rank_display}"
if title_data["time_display"]:
result += f" - {title_data['time_display']}"
if title_data["count"] > 1:
result += f" ({title_data['count']}次)"
return result
elif platform == "telegram":
if link_url:
formatted_title = f'<a href="{link_url}">{html_escape(cleaned_title)}</a>'
else:
formatted_title = cleaned_title
title_prefix = "🆕 " if title_data.get("is_new") else ""
if show_source:
result = f"[{title_data['source_name']}] {title_prefix}{formatted_title}"
else:
result = f"{title_prefix}{formatted_title}"
if rank_display:
result += f" {rank_display}"
if title_data["time_display"]:
result += f" <code>- {title_data['time_display']}</code>"
if title_data["count"] > 1:
result += f" <code>({title_data['count']}次)</code>"
return result
elif platform == "ntfy":
if link_url:
formatted_title = f"[{cleaned_title}]({link_url})"
else:
formatted_title = cleaned_title
title_prefix = "🆕 " if title_data.get("is_new") else ""
if show_source:
result = f"[{title_data['source_name']}] {title_prefix}{formatted_title}"
else:
result = f"{title_prefix}{formatted_title}"
if rank_display:
result += f" {rank_display}"
if title_data["time_display"]:
result += f" `- {title_data['time_display']}`"
if title_data["count"] > 1:
result += f" `({title_data['count']}次)`"
return result
elif platform == "html":
rank_display = format_rank_display(
title_data["ranks"], title_data["rank_threshold"], "html"
)
link_url = title_data["mobile_url"] or title_data["url"]
escaped_title = html_escape(cleaned_title)
escaped_source_name = html_escape(title_data["source_name"])
if link_url:
escaped_url = html_escape(link_url)
formatted_title = f'[{escaped_source_name}] <a href="{escaped_url}" target="_blank" class="news-link">{escaped_title}</a>'
else:
formatted_title = (
f'[{escaped_source_name}] <span class="no-link">{escaped_title}</span>'
)
if rank_display:
formatted_title += f" {rank_display}"
if title_data["time_display"]:
escaped_time = html_escape(title_data["time_display"])
formatted_title += f" <font color='grey'>- {escaped_time}</font>"
if title_data["count"] > 1:
formatted_title += f" <font color='green'>({title_data['count']}次)</font>"
if title_data.get("is_new"):
formatted_title = f"<div class='new-title'>🆕 {formatted_title}</div>"
return formatted_title
else:
return cleaned_title
def generate_html_report(
stats: List[Dict],
total_titles: int,
failed_ids: Optional[List] = None,
new_titles: Optional[Dict] = None,
id_to_name: Optional[Dict] = None,
mode: str = "daily",
is_daily_summary: bool = False,
update_info: Optional[Dict] = None,
) -> str:
"""生成HTML报告"""
if is_daily_summary:
if mode == "current":
filename = "当前榜单汇总.html"
elif mode == "incremental":
filename = "当日增量.html"
else:
filename = "当日汇总.html"
else:
filename = f"{format_time_filename()}.html"
file_path = get_output_path("html", filename)
report_data = prepare_report_data(stats, failed_ids, new_titles, id_to_name, mode)
html_content = render_html_content(
report_data, total_titles, is_daily_summary, mode, update_info
)
with open(file_path, "w", encoding="utf-8") as f:
f.write(html_content)
if is_daily_summary:
root_file_path = Path("index.html")
with open(root_file_path, "w", encoding="utf-8") as f:
f.write(html_content)
return file_path
def render_html_content(
report_data: Dict,
total_titles: int,
is_daily_summary: bool = False,
mode: str = "daily",
update_info: Optional[Dict] = None,
) -> str:
"""渲染HTML内容"""
html = """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>热点新闻分析</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js" integrity="sha512-BNaRQnYJYiPSqHHDb58B0yaPfCu+Wgds8Gp/gU33kqBtgNS4tSPHuGibyoeqMV/TJlSKda6FXzoEyYGjTe+vXA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<style>
* { box-sizing: border-box; }
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif;
margin: 0;
padding: 16px;
background: #fafafa;
color: #333;
line-height: 1.5;
}
.container {
max-width: 600px;
margin: 0 auto;
background: white;
border-radius: 12px;
overflow: hidden;
box-shadow: 0 2px 16px rgba(0,0,0,0.06);
}
.header {
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
color: white;
padding: 32px 24px;
text-align: center;
position: relative;
}
.save-buttons {
position: absolute;
top: 16px;
right: 16px;
display: flex;
gap: 8px;
}
.save-btn {
background: rgba(255, 255, 255, 0.2);
border: 1px solid rgba(255, 255, 255, 0.3);
color: white;
padding: 8px 16px;
border-radius: 6px;
cursor: pointer;
font-size: 13px;
font-weight: 500;
transition: all 0.2s ease;
backdrop-filter: blur(10px);
white-space: nowrap;
}
.save-btn:hover {
background: rgba(255, 255, 255, 0.3);
border-color: rgba(255, 255, 255, 0.5);
transform: translateY(-1px);
}
.save-btn:active {
transform: translateY(0);
}
.save-btn:disabled {
opacity: 0.6;
cursor: not-allowed;
}
.header-title {
font-size: 22px;
font-weight: 700;
margin: 0 0 20px 0;
}
.header-info {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 16px;
font-size: 14px;
opacity: 0.95;
}
.info-item {
text-align: center;
}
.info-label {
display: block;
font-size: 12px;
opacity: 0.8;
margin-bottom: 4px;
}
.info-value {
font-weight: 600;
font-size: 16px;
}
.content {
padding: 24px;
}
.word-group {
margin-bottom: 40px;
}
.word-group:first-child {
margin-top: 0;
}
.word-header {
display: flex;
align-items: center;
justify-content: space-between;
margin-bottom: 20px;
padding-bottom: 8px;
border-bottom: 1px solid #f0f0f0;
}
.word-info {
display: flex;
align-items: center;
gap: 12px;
}
.word-name {
font-size: 17px;
font-weight: 600;
color: #1a1a1a;
}
.word-count {
color: #666;
font-size: 13px;
font-weight: 500;
}
.word-count.hot { color: #dc2626; font-weight: 600; }
.word-count.warm { color: #ea580c; font-weight: 600; }
.word-index {
color: #999;
font-size: 12px;
}
.news-item {
margin-bottom: 20px;
padding: 16px 0;
border-bottom: 1px solid #f5f5f5;
position: relative;
display: flex;
gap: 12px;
align-items: center;
}
.news-item:last-child {
border-bottom: none;
}
.news-item.new::after {
content: "NEW";
position: absolute;
top: 12px;
right: 0;
background: #fbbf24;
color: #92400e;
font-size: 9px;
font-weight: 700;
padding: 3px 6px;
border-radius: 4px;
letter-spacing: 0.5px;
}
.news-number {
color: #999;
font-size: 13px;
font-weight: 600;
min-width: 20px;
text-align: center;
flex-shrink: 0;
background: #f8f9fa;
border-radius: 50%;
width: 24px;
height: 24px;
display: flex;
align-items: center;
justify-content: center;
align-self: flex-start;
margin-top: 8px;
}
.news-content {
flex: 1;
min-width: 0;
padding-right: 40px;
}
.news-item.new .news-content {
padding-right: 50px;
}
.news-header {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
flex-wrap: wrap;
}
.source-name {
color: #666;
font-size: 12px;
font-weight: 500;
}
.rank-num {
color: #fff;
background: #6b7280;
font-size: 10px;
font-weight: 700;
padding: 2px 6px;
border-radius: 10px;
min-width: 18px;
text-align: center;
}
.rank-num.top { background: #dc2626; }
.rank-num.high { background: #ea580c; }
.time-info {
color: #999;
font-size: 11px;
}
.count-info {
color: #059669;
font-size: 11px;
font-weight: 500;
}
.news-title {
font-size: 15px;
line-height: 1.4;
color: #1a1a1a;
margin: 0;
}
.news-link {
color: #2563eb;
text-decoration: none;
}
.news-link:hover {
text-decoration: underline;
}
.news-link:visited {
color: #7c3aed;
}
.new-section {
margin-top: 40px;
padding-top: 24px;
border-top: 2px solid #f0f0f0;
}
.new-section-title {
color: #1a1a1a;
font-size: 16px;
font-weight: 600;
margin: 0 0 20px 0;
}
.new-source-group {
margin-bottom: 24px;
}
.new-source-title {
color: #666;
font-size: 13px;
font-weight: 500;
margin: 0 0 12px 0;
padding-bottom: 6px;
border-bottom: 1px solid #f5f5f5;
}
.new-item {
display: flex;
align-items: center;
gap: 12px;
padding: 8px 0;
border-bottom: 1px solid #f9f9f9;
}
.new-item:last-child {
border-bottom: none;
}
.new-item-number {
color: #999;
font-size: 12px;
font-weight: 600;
min-width: 18px;
text-align: center;
flex-shrink: 0;
background: #f8f9fa;
border-radius: 50%;
width: 20px;
height: 20px;
display: flex;
align-items: center;
justify-content: center;
}
.new-item-rank {
color: #fff;
background: #6b7280;
font-size: 10px;
font-weight: 700;
padding: 3px 6px;
border-radius: 8px;
min-width: 20px;
text-align: center;
flex-shrink: 0;
}
.new-item-rank.top { background: #dc2626; }
.new-item-rank.high { background: #ea580c; }
.new-item-content {
flex: 1;
min-width: 0;
}
.new-item-title {
font-size: 14px;
line-height: 1.4;
color: #1a1a1a;
margin: 0;
}
.error-section {
background: #fef2f2;
border: 1px solid #fecaca;
border-radius: 8px;
padding: 16px;
margin-bottom: 24px;
}
.error-title {
color: #dc2626;
font-size: 14px;
font-weight: 600;
margin: 0 0 8px 0;
}
.error-list {
list-style: none;
padding: 0;
margin: 0;
}
.error-item {
color: #991b1b;
font-size: 13px;
padding: 2px 0;
font-family: 'SF Mono', Consolas, monospace;
}
.footer {
margin-top: 32px;
padding: 20px 24px;
background: #f8f9fa;
border-top: 1px solid #e5e7eb;
text-align: center;
}
.footer-content {
font-size: 13px;
color: #6b7280;
line-height: 1.6;
}
.footer-link {
color: #4f46e5;
text-decoration: none;
font-weight: 500;
transition: color 0.2s ease;
}
.footer-link:hover {
color: #7c3aed;
text-decoration: underline;
}
.project-name {
font-weight: 600;
color: #374151;
}
@media (max-width: 480px) {
body { padding: 12px; }
.header { padding: 24px 20px; }
.content { padding: 20px; }
.footer { padding: 16px 20px; }
.header-info { grid-template-columns: 1fr; gap: 12px; }
.news-header { gap: 6px; }
.news-content { padding-right: 45px; }
.news-item { gap: 8px; }
.new-item { gap: 8px; }
.news-number { width: 20px; height: 20px; font-size: 12px; }
.save-buttons {
position: static;
margin-bottom: 16px;
display: flex;
gap: 8px;
justify-content: center;
flex-direction: column;
width: 100%;
}
.save-btn {
width: 100%;
}
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<div class="save-buttons">
<button class="save-btn" onclick="saveAsImage()">保存为图片</button>
<button class="save-btn" onclick="saveAsMultipleImages()">分段保存</button>
</div>
<div class="header-title">热点新闻分析</div>
<div class="header-info">
<div class="info-item">
<span class="info-label">报告类型</span>
<span class="info-value">"""
# 处理报告类型显示
if is_daily_summary:
if mode == "current":
html += "当前榜单"
elif mode == "incremental":
html += "增量模式"
else:
html += "当日汇总"
else:
html += "实时分析"
html += """</span>
</div>
<div class="info-item">
<span class="info-label">新闻总数</span>
<span class="info-value">"""
html += f"{total_titles} 条"
# 计算筛选后的热点新闻数量
hot_news_count = sum(len(stat["titles"]) for stat in report_data["stats"])
html += """</span>
</div>
<div class="info-item">
<span class="info-label">热点新闻</span>
<span class="info-value">"""
html += f"{hot_news_count} 条"
html += """</span>
</div>
<div class="info-item">
<span class="info-label">生成时间</span>
<span class="info-value">"""
now = get_beijing_time()
html += now.strftime("%m-%d %H:%M")
html += """</span>
</div>
</div>
</div>
<div class="content">"""
# 处理失败ID错误信息
if report_data["failed_ids"]:
html += """
<div class="error-section">
<div class="error-title">⚠️ 请求失败的平台</div>
<ul class="error-list">"""
for id_value in report_data["failed_ids"]:
html += f'<li class="error-item">{html_escape(id_value)}</li>'
html += """
</ul>
</div>"""
# 处理主要统计数据
if report_data["stats"]:
total_count = len(report_data["stats"])
for i, stat in enumerate(report_data["stats"], 1):
count = stat["count"]
# 确定热度等级
if count >= 10:
count_class = "hot"
elif count >= 5:
count_class = "warm"
else:
count_class = ""
escaped_word = html_escape(stat["word"])
html += f"""
<div class="word-group">
<div class="word-header">
<div class="word-info">
<div class="word-name">{escaped_word}</div>
<div class="word-count {count_class}">{count} 条</div>
</div>
<div class="word-index">{i}/{total_count}</div>
</div>"""
# 处理每个词组下的新闻标题,给每条新闻标上序号
for j, title_data in enumerate(stat["titles"], 1):
is_new = title_data.get("is_new", False)
new_class = "new" if is_new else ""
html += f"""
<div class="news-item {new_class}">
<div class="news-number">{j}</div>
<div class="news-content">
<div class="news-header">
<span class="source-name">{html_escape(title_data["source_name"])}</span>"""
# 处理排名显示
ranks = title_data.get("ranks", [])
if ranks:
min_rank = min(ranks)
max_rank = max(ranks)
rank_threshold = title_data.get("rank_threshold", 10)
# 确定排名等级
if min_rank <= 3:
rank_class = "top"
elif min_rank <= rank_threshold:
rank_class = "high"
else:
rank_class = ""
if min_rank == max_rank:
rank_text = str(min_rank)
else:
rank_text = f"{min_rank}-{max_rank}"
html += f'<span class="rank-num {rank_class}">{rank_text}</span>'
# 处理时间显示
time_display = title_data.get("time_display", "")
if time_display:
# 简化时间显示格式,将波浪线替换为~
simplified_time = (
time_display.replace(" ~ ", "~")
.replace("[", "")
.replace("]", "")
)
html += (
f'<span class="time-info">{html_escape(simplified_time)}</span>'
)
# 处理出现次数
count_info = title_data.get("count", 1)
if count_info > 1:
html += f'<span class="count-info">{count_info}次</span>'
html += """
</div>
<div class="news-title">"""
# 处理标题和链接
escaped_title = html_escape(title_data["title"])
link_url = title_data.get("mobile_url") or title_data.get("url", "")
if link_url:
escaped_url = html_escape(link_url)
html += f'<a href="{escaped_url}" target="_blank" class="news-link">{escaped_title}</a>'
else:
html += escaped_title
html += """
</div>
</div>
</div>"""
html += """
</div>"""
# 处理新增新闻区域
if report_data["new_titles"]:
html += f"""
<div class="new-section">
<div class="new-section-title">本次新增热点 (共 {report_data['total_new_count']} 条)</div>"""
for source_data in report_data["new_titles"]:
escaped_source = html_escape(source_data["source_name"])
titles_count = len(source_data["titles"])
html += f"""
<div class="new-source-group">
<div class="new-source-title">{escaped_source} · {titles_count}条</div>"""
# 为新增新闻也添加序号
for idx, title_data in enumerate(source_data["titles"], 1):
ranks = title_data.get("ranks", [])
# 处理新增新闻的排名显示
rank_class = ""
if ranks:
min_rank = min(ranks)
if min_rank <= 3:
rank_class = "top"
elif min_rank <= title_data.get("rank_threshold", 10):
rank_class = "high"
if len(ranks) == 1:
rank_text = str(ranks[0])
else:
rank_text = f"{min(ranks)}-{max(ranks)}"
else:
rank_text = "?"
html += f"""
<div class="new-item">
<div class="new-item-number">{idx}</div>
<div class="new-item-rank {rank_class}">{rank_text}</div>
<div class="new-item-content">
<div class="new-item-title">"""
# 处理新增新闻的链接
escaped_title = html_escape(title_data["title"])
link_url = title_data.get("mobile_url") or title_data.get("url", "")
if link_url:
escaped_url = html_escape(link_url)
html += f'<a href="{escaped_url}" target="_blank" class="news-link">{escaped_title}</a>'
else:
html += escaped_title
html += """
</div>
</div>
</div>"""
html += """
</div>"""
html += """
</div>"""
html += """
</div>
<div class="footer">
<div class="footer-content">
由 <span class="project-name">TrendRadar</span> 生成 ·
<a href="https://github.com/sansan0/TrendRadar" target="_blank" class="footer-link">
GitHub 开源项目
</a>"""
if update_info:
html += f"""
<br>
<span style="color: #ea580c; font-weight: 500;">
发现新版本 {update_info['remote_version']},当前版本 {update_info['current_version']}
</span>"""
html += """
</div>
</div>
</div>
<script>
async function saveAsImage() {
const button = event.target;
const originalText = button.textContent;
try {
button.textContent = '生成中...';
button.disabled = true;
window.scrollTo(0, 0);
// 等待页面稳定
await new Promise(resolve => setTimeout(resolve, 200));
// 截图前隐藏按钮
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'hidden';
// 再次等待确保按钮完全隐藏
await new Promise(resolve => setTimeout(resolve, 100));
const container = document.querySelector('.container');
const canvas = await html2canvas(container, {
backgroundColor: '#ffffff',
scale: 1.5,
useCORS: true,
allowTaint: false,
imageTimeout: 10000,
removeContainer: false,
foreignObjectRendering: false,
logging: false,
width: container.offsetWidth,
height: container.offsetHeight,
x: 0,
y: 0,
scrollX: 0,
scrollY: 0,
windowWidth: window.innerWidth,
windowHeight: window.innerHeight
});
buttons.style.visibility = 'visible';
const link = document.createElement('a');
const now = new Date();
const filename = `TrendRadar_热点新闻分析_${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, '0')}${String(now.getDate()).padStart(2, '0')}_${String(now.getHours()).padStart(2, '0')}${String(now.getMinutes()).padStart(2, '0')}.png`;
link.download = filename;
link.href = canvas.toDataURL('image/png', 1.0);
// 触发下载
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
button.textContent = '保存成功!';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
} catch (error) {
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'visible';
button.textContent = '保存失败';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
}
}
async function saveAsMultipleImages() {
const button = event.target;
const originalText = button.textContent;
const container = document.querySelector('.container');
const scale = 1.5;
const maxHeight = 5000 / scale;
try {
button.textContent = '分析中...';
button.disabled = true;
// 获取所有可能的分割元素
const newsItems = Array.from(container.querySelectorAll('.news-item'));
const wordGroups = Array.from(container.querySelectorAll('.word-group'));
const newSection = container.querySelector('.new-section');
const errorSection = container.querySelector('.error-section');
const header = container.querySelector('.header');
const footer = container.querySelector('.footer');
// 计算元素位置和高度
const containerRect = container.getBoundingClientRect();
const elements = [];
// 添加header作为必须包含的元素
elements.push({
type: 'header',
element: header,
top: 0,
bottom: header.offsetHeight,
height: header.offsetHeight
});
// 添加错误信息(如果存在)
if (errorSection) {
const rect = errorSection.getBoundingClientRect();
elements.push({
type: 'error',
element: errorSection,
top: rect.top - containerRect.top,
bottom: rect.bottom - containerRect.top,
height: rect.height
});
}
// 按word-group分组处理news-item
wordGroups.forEach(group => {
const groupRect = group.getBoundingClientRect();
const groupNewsItems = group.querySelectorAll('.news-item');
// 添加word-group的header部分
const wordHeader = group.querySelector('.word-header');
if (wordHeader) {
const headerRect = wordHeader.getBoundingClientRect();
elements.push({
type: 'word-header',
element: wordHeader,
parent: group,
top: groupRect.top - containerRect.top,
bottom: headerRect.bottom - containerRect.top,
height: headerRect.height
});
}
// 添加每个news-item
groupNewsItems.forEach(item => {
const rect = item.getBoundingClientRect();
elements.push({
type: 'news-item',
element: item,
parent: group,
top: rect.top - containerRect.top,
bottom: rect.bottom - containerRect.top,
height: rect.height
});
});
});
// 添加新增新闻部分
if (newSection) {
const rect = newSection.getBoundingClientRect();
elements.push({
type: 'new-section',
element: newSection,
top: rect.top - containerRect.top,
bottom: rect.bottom - containerRect.top,
height: rect.height
});
}
// 添加footer
const footerRect = footer.getBoundingClientRect();
elements.push({
type: 'footer',
element: footer,
top: footerRect.top - containerRect.top,
bottom: footerRect.bottom - containerRect.top,
height: footer.offsetHeight
});
// 计算分割点
const segments = [];
let currentSegment = { start: 0, end: 0, height: 0, includeHeader: true };
let headerHeight = header.offsetHeight;
currentSegment.height = headerHeight;
for (let i = 1; i < elements.length; i++) {
const element = elements[i];
const potentialHeight = element.bottom - currentSegment.start;
// 检查是否需要创建新分段
if (potentialHeight > maxHeight && currentSegment.height > headerHeight) {
// 在前一个元素结束处分割
currentSegment.end = elements[i - 1].bottom;
segments.push(currentSegment);
// 开始新分段
currentSegment = {
start: currentSegment.end,
end: 0,
height: element.bottom - currentSegment.end,
includeHeader: false
};
} else {
currentSegment.height = potentialHeight;
currentSegment.end = element.bottom;
}
}
// 添加最后一个分段
if (currentSegment.height > 0) {
currentSegment.end = container.offsetHeight;
segments.push(currentSegment);
}
button.textContent = `生成中 (0/${segments.length})...`;
// 隐藏保存按钮
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'hidden';
// 为每个分段生成图片
const images = [];
for (let i = 0; i < segments.length; i++) {
const segment = segments[i];
button.textContent = `生成中 (${i + 1}/${segments.length})...`;
// 创建临时容器用于截图
const tempContainer = document.createElement('div');
tempContainer.style.cssText = `
position: absolute;
left: -9999px;
top: 0;
width: ${container.offsetWidth}px;
background: white;
`;
tempContainer.className = 'container';
// 克隆容器内容
const clonedContainer = container.cloneNode(true);
// 移除克隆内容中的保存按钮
const clonedButtons = clonedContainer.querySelector('.save-buttons');
if (clonedButtons) {
clonedButtons.style.display = 'none';
}
tempContainer.appendChild(clonedContainer);
document.body.appendChild(tempContainer);
// 等待DOM更新
await new Promise(resolve => setTimeout(resolve, 100));
// 使用html2canvas截取特定区域
const canvas = await html2canvas(clonedContainer, {
backgroundColor: '#ffffff',
scale: scale,
useCORS: true,
allowTaint: false,
imageTimeout: 10000,
logging: false,
width: container.offsetWidth,
height: segment.end - segment.start,
x: 0,
y: segment.start,
windowWidth: window.innerWidth,
windowHeight: window.innerHeight
});
images.push(canvas.toDataURL('image/png', 1.0));
// 清理临时容器
document.body.removeChild(tempContainer);
}
// 恢复按钮显示
buttons.style.visibility = 'visible';
// 下载所有图片
const now = new Date();
const baseFilename = `TrendRadar_热点新闻分析_${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, '0')}${String(now.getDate()).padStart(2, '0')}_${String(now.getHours()).padStart(2, '0')}${String(now.getMinutes()).padStart(2, '0')}`;
for (let i = 0; i < images.length; i++) {
const link = document.createElement('a');
link.download = `${baseFilename}_part${i + 1}.png`;
link.href = images[i];
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
// 延迟一下避免浏览器阻止多个下载
await new Promise(resolve => setTimeout(resolve, 100));
}
button.textContent = `已保存 ${segments.length} 张图片!`;
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
} catch (error) {
console.error('分段保存失败:', error);
const buttons = document.querySelector('.save-buttons');
buttons.style.visibility = 'visible';
button.textContent = '保存失败';
setTimeout(() => {
button.textContent = originalText;
button.disabled = false;
}, 2000);
}
}
document.addEventListener('DOMContentLoaded', function() {
window.scrollTo(0, 0);
});
</script>
</body>
</html>
"""
return html
def render_feishu_content(
report_data: Dict, update_info: Optional[Dict] = None, mode: str = "daily"
) -> str:
"""渲染飞书内容"""
text_content = ""
if report_data["stats"]:
text_content += f"📊 **热点词汇统计**\n\n"
total_count = len(report_data["stats"])
for i, stat in enumerate(report_data["stats"]):
word = stat["word"]
count = stat["count"]
sequence_display = f"<font color='grey'>[{i + 1}/{total_count}]</font>"
if count >= 10:
text_content += f"🔥 {sequence_display} **{word}** : <font color='red'>{count}</font> 条\n\n"
elif count >= 5:
text_content += f"📈 {sequence_display} **{word}** : <font color='orange'>{count}</font> 条\n\n"
else:
text_content += f"📌 {sequence_display} **{word}** : {count} 条\n\n"
for j, title_data in enumerate(stat["titles"], 1):
formatted_title = format_title_for_platform(
"feishu", title_data, show_source=True
)
text_content += f" {j}. {formatted_title}\n"
if j < len(stat["titles"]):
text_content += "\n"
if i < len(report_data["stats"]) - 1:
text_content += f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n"
if not text_content:
if mode == "incremental":
mode_text = "增量模式下暂无新增匹配的热点词汇"
elif mode == "current":
mode_text = "当前榜单模式下暂无匹配的热点词汇"
else:
mode_text = "暂无匹配的热点词汇"
text_content = f"📭 {mode_text}\n\n"
if report_data["new_titles"]:
if text_content and "暂无匹配" not in text_content:
text_content += f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n"
text_content += (
f"🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
)
for source_data in report_data["new_titles"]:
text_content += (
f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n"
)
for j, title_data in enumerate(source_data["titles"], 1):
title_data_copy = title_data.copy()
title_data_copy["is_new"] = False
formatted_title = format_title_for_platform(
"feishu", title_data_copy, show_source=False
)
text_content += f" {j}. {formatted_title}\n"
text_content += "\n"
if report_data["failed_ids"]:
if text_content and "暂无匹配" not in text_content:
text_content += f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n"
text_content += "⚠️ **数据获取失败的平台:**\n\n"
for i, id_value in enumerate(report_data["failed_ids"], 1):
text_content += f" • <font color='red'>{id_value}</font>\n"
now = get_beijing_time()
text_content += (
f"\n\n<font color='grey'>更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}</font>"
)
if update_info:
text_content += f"\n<font color='grey'>TrendRadar 发现新版本 {update_info['remote_version']},当前 {update_info['current_version']}</font>"
return text_content
def render_dingtalk_content(
report_data: Dict, update_info: Optional[Dict] = None, mode: str = "daily"
) -> str:
"""渲染钉钉内容"""
text_content = ""
total_titles = sum(
len(stat["titles"]) for stat in report_data["stats"] if stat["count"] > 0
)
now = get_beijing_time()
text_content += f"**总新闻数:** {total_titles}\n\n"
text_content += f"**时间:** {now.strftime('%Y-%m-%d %H:%M:%S')}\n\n"
text_content += f"**类型:** 热点分析报告\n\n"
text_content += "---\n\n"
if report_data["stats"]:
text_content += f"📊 **热点词汇统计**\n\n"
total_count = len(report_data["stats"])
for i, stat in enumerate(report_data["stats"]):
word = stat["word"]
count = stat["count"]
sequence_display = f"[{i + 1}/{total_count}]"
if count >= 10:
text_content += f"🔥 {sequence_display} **{word}** : **{count}** 条\n\n"
elif count >= 5:
text_content += f"📈 {sequence_display} **{word}** : **{count}** 条\n\n"
else:
text_content += f"📌 {sequence_display} **{word}** : {count} 条\n\n"
for j, title_data in enumerate(stat["titles"], 1):
formatted_title = format_title_for_platform(
"dingtalk", title_data, show_source=True
)
text_content += f" {j}. {formatted_title}\n"
if j < len(stat["titles"]):
text_content += "\n"
if i < len(report_data["stats"]) - 1:
text_content += f"\n---\n\n"
if not report_data["stats"]:
if mode == "incremental":
mode_text = "增量模式下暂无新增匹配的热点词汇"
elif mode == "current":
mode_text = "当前榜单模式下暂无匹配的热点词汇"
else:
mode_text = "暂无匹配的热点词汇"
text_content += f"📭 {mode_text}\n\n"
if report_data["new_titles"]:
if text_content and "暂无匹配" not in text_content:
text_content += f"\n---\n\n"
text_content += (
f"🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
)
for source_data in report_data["new_titles"]:
text_content += f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n\n"
for j, title_data in enumerate(source_data["titles"], 1):
title_data_copy = title_data.copy()
title_data_copy["is_new"] = False
formatted_title = format_title_for_platform(
"dingtalk", title_data_copy, show_source=False
)
text_content += f" {j}. {formatted_title}\n"
text_content += "\n"
if report_data["failed_ids"]:
if text_content and "暂无匹配" not in text_content:
text_content += f"\n---\n\n"
text_content += "⚠️ **数据获取失败的平台:**\n\n"
for i, id_value in enumerate(report_data["failed_ids"], 1):
text_content += f" • **{id_value}**\n"
text_content += f"\n\n> 更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}"
if update_info:
text_content += f"\n> TrendRadar 发现新版本 **{update_info['remote_version']}**,当前 **{update_info['current_version']}**"
return text_content
def split_content_into_batches(
report_data: Dict,
format_type: str,
update_info: Optional[Dict] = None,
max_bytes: int = None,
mode: str = "daily",
) -> List[str]:
"""分批处理消息内容,确保词组标题+至少第一条新闻的完整性"""
if max_bytes is None:
if format_type == "dingtalk":
max_bytes = CONFIG.get("DINGTALK_BATCH_SIZE", 20000)
elif format_type == "feishu":
max_bytes = CONFIG.get("FEISHU_BATCH_SIZE", 29000)
elif format_type == "ntfy":
max_bytes = 3800
else:
max_bytes = CONFIG.get("MESSAGE_BATCH_SIZE", 4000)
batches = []
total_titles = sum(
len(stat["titles"]) for stat in report_data["stats"] if stat["count"] > 0
)
now = get_beijing_time()
base_header = ""
if format_type == "wework":
base_header = f"**总新闻数:** {total_titles}\n\n\n\n"
elif format_type == "telegram":
base_header = f"总新闻数: {total_titles}\n\n"
elif format_type == "ntfy":
base_header = f"**总新闻数:** {total_titles}\n\n"
elif format_type == "feishu":
base_header = ""
elif format_type == "dingtalk":
base_header = f"**总新闻数:** {total_titles}\n\n"
base_header += f"**时间:** {now.strftime('%Y-%m-%d %H:%M:%S')}\n\n"
base_header += f"**类型:** 热点分析报告\n\n"
base_header += "---\n\n"
base_footer = ""
if format_type == "wework":
base_footer = f"\n\n\n> 更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}"
if update_info:
base_footer += f"\n> TrendRadar 发现新版本 **{update_info['remote_version']}**,当前 **{update_info['current_version']}**"
elif format_type == "telegram":
base_footer = f"\n\n更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}"
if update_info:
base_footer += f"\nTrendRadar 发现新版本 {update_info['remote_version']},当前 {update_info['current_version']}"
elif format_type == "ntfy":
base_footer = f"\n\n> 更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}"
if update_info:
base_footer += f"\n> TrendRadar 发现新版本 **{update_info['remote_version']}**,当前 **{update_info['current_version']}**"
elif format_type == "feishu":
base_footer = f"\n\n<font color='grey'>更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}</font>"
if update_info:
base_footer += f"\n<font color='grey'>TrendRadar 发现新版本 {update_info['remote_version']},当前 {update_info['current_version']}</font>"
elif format_type == "dingtalk":
base_footer = f"\n\n> 更新时间:{now.strftime('%Y-%m-%d %H:%M:%S')}"
if update_info:
base_footer += f"\n> TrendRadar 发现新版本 **{update_info['remote_version']}**,当前 **{update_info['current_version']}**"
stats_header = ""
if report_data["stats"]:
if format_type == "wework":
stats_header = f"📊 **热点词汇统计**\n\n"
elif format_type == "telegram":
stats_header = f"📊 热点词汇统计\n\n"
elif format_type == "ntfy":
stats_header = f"📊 **热点词汇统计**\n\n"
elif format_type == "feishu":
stats_header = f"📊 **热点词汇统计**\n\n"
elif format_type == "dingtalk":
stats_header = f"📊 **热点词汇统计**\n\n"
current_batch = base_header
current_batch_has_content = False
if (
not report_data["stats"]
and not report_data["new_titles"]
and not report_data["failed_ids"]
):
if mode == "incremental":
mode_text = "增量模式下暂无新增匹配的热点词汇"
elif mode == "current":
mode_text = "当前榜单模式下暂无匹配的热点词汇"
else:
mode_text = "暂无匹配的热点词汇"
simple_content = f"📭 {mode_text}\n\n"
final_content = base_header + simple_content + base_footer
batches.append(final_content)
return batches
# 处理热点词汇统计
if report_data["stats"]:
total_count = len(report_data["stats"])
# 添加统计标题
test_content = current_batch + stats_header
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
< max_bytes
):
current_batch = test_content
current_batch_has_content = True
else:
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + stats_header
current_batch_has_content = True
# 逐个处理词组(确保词组标题+第一条新闻的原子性)
for i, stat in enumerate(report_data["stats"]):
word = stat["word"]
count = stat["count"]
sequence_display = f"[{i + 1}/{total_count}]"
# 构建词组标题
word_header = ""
if format_type == "wework":
if count >= 10:
word_header = (
f"🔥 {sequence_display} **{word}** : **{count}** 条\n\n"
)
elif count >= 5:
word_header = (
f"📈 {sequence_display} **{word}** : **{count}** 条\n\n"
)
else:
word_header = f"📌 {sequence_display} **{word}** : {count} 条\n\n"
elif format_type == "telegram":
if count >= 10:
word_header = f"🔥 {sequence_display} {word} : {count} 条\n\n"
elif count >= 5:
word_header = f"📈 {sequence_display} {word} : {count} 条\n\n"
else:
word_header = f"📌 {sequence_display} {word} : {count} 条\n\n"
elif format_type == "ntfy":
if count >= 10:
word_header = (
f"🔥 {sequence_display} **{word}** : **{count}** 条\n\n"
)
elif count >= 5:
word_header = (
f"📈 {sequence_display} **{word}** : **{count}** 条\n\n"
)
else:
word_header = f"📌 {sequence_display} **{word}** : {count} 条\n\n"
elif format_type == "feishu":
if count >= 10:
word_header = f"🔥 <font color='grey'>{sequence_display}</font> **{word}** : <font color='red'>{count}</font> 条\n\n"
elif count >= 5:
word_header = f"📈 <font color='grey'>{sequence_display}</font> **{word}** : <font color='orange'>{count}</font> 条\n\n"
else:
word_header = f"📌 <font color='grey'>{sequence_display}</font> **{word}** : {count} 条\n\n"
elif format_type == "dingtalk":
if count >= 10:
word_header = (
f"🔥 {sequence_display} **{word}** : **{count}** 条\n\n"
)
elif count >= 5:
word_header = (
f"📈 {sequence_display} **{word}** : **{count}** 条\n\n"
)
else:
word_header = f"📌 {sequence_display} **{word}** : {count} 条\n\n"
# 构建第一条新闻
first_news_line = ""
if stat["titles"]:
first_title_data = stat["titles"][0]
if format_type == "wework":
formatted_title = format_title_for_platform(
"wework", first_title_data, show_source=True
)
elif format_type == "telegram":
formatted_title = format_title_for_platform(
"telegram", first_title_data, show_source=True
)
elif format_type == "ntfy":
formatted_title = format_title_for_platform(
"ntfy", first_title_data, show_source=True
)
elif format_type == "feishu":
formatted_title = format_title_for_platform(
"feishu", first_title_data, show_source=True
)
elif format_type == "dingtalk":
formatted_title = format_title_for_platform(
"dingtalk", first_title_data, show_source=True
)
else:
formatted_title = f"{first_title_data['title']}"
first_news_line = f" 1. {formatted_title}\n"
if len(stat["titles"]) > 1:
first_news_line += "\n"
# 原子性检查:词组标题+第一条新闻必须一起处理
word_with_first_news = word_header + first_news_line
test_content = current_batch + word_with_first_news
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
# 当前批次容纳不下,开启新批次
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + stats_header + word_with_first_news
current_batch_has_content = True
start_index = 1
else:
current_batch = test_content
current_batch_has_content = True
start_index = 1
# 处理剩余新闻条目
for j in range(start_index, len(stat["titles"])):
title_data = stat["titles"][j]
if format_type == "wework":
formatted_title = format_title_for_platform(
"wework", title_data, show_source=True
)
elif format_type == "telegram":
formatted_title = format_title_for_platform(
"telegram", title_data, show_source=True
)
elif format_type == "ntfy":
formatted_title = format_title_for_platform(
"ntfy", title_data, show_source=True
)
elif format_type == "feishu":
formatted_title = format_title_for_platform(
"feishu", title_data, show_source=True
)
elif format_type == "dingtalk":
formatted_title = format_title_for_platform(
"dingtalk", title_data, show_source=True
)
else:
formatted_title = f"{title_data['title']}"
news_line = f" {j + 1}. {formatted_title}\n"
if j < len(stat["titles"]) - 1:
news_line += "\n"
test_content = current_batch + news_line
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + stats_header + word_header + news_line
current_batch_has_content = True
else:
current_batch = test_content
current_batch_has_content = True
# 词组间分隔符
if i < len(report_data["stats"]) - 1:
separator = ""
if format_type == "wework":
separator = f"\n\n\n\n"
elif format_type == "telegram":
separator = f"\n\n"
elif format_type == "ntfy":
separator = f"\n\n"
elif format_type == "feishu":
separator = f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n"
elif format_type == "dingtalk":
separator = f"\n---\n\n"
test_content = current_batch + separator
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
< max_bytes
):
current_batch = test_content
# 处理新增新闻(同样确保来源标题+第一条新闻的原子性)
if report_data["new_titles"]:
new_header = ""
if format_type == "wework":
new_header = f"\n\n\n\n🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
elif format_type == "telegram":
new_header = (
f"\n\n🆕 本次新增热点新闻 (共 {report_data['total_new_count']} 条)\n\n"
)
elif format_type == "ntfy":
new_header = f"\n\n🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
elif format_type == "feishu":
new_header = f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
elif format_type == "dingtalk":
new_header = f"\n---\n\n🆕 **本次新增热点新闻** (共 {report_data['total_new_count']} 条)\n\n"
test_content = current_batch + new_header
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + new_header
current_batch_has_content = True
else:
current_batch = test_content
current_batch_has_content = True
# 逐个处理新增新闻来源
for source_data in report_data["new_titles"]:
source_header = ""
if format_type == "wework":
source_header = f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n\n"
elif format_type == "telegram":
source_header = f"{source_data['source_name']} ({len(source_data['titles'])} 条):\n\n"
elif format_type == "ntfy":
source_header = f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n\n"
elif format_type == "feishu":
source_header = f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n\n"
elif format_type == "dingtalk":
source_header = f"**{source_data['source_name']}** ({len(source_data['titles'])} 条):\n\n"
# 构建第一条新增新闻
first_news_line = ""
if source_data["titles"]:
first_title_data = source_data["titles"][0]
title_data_copy = first_title_data.copy()
title_data_copy["is_new"] = False
if format_type == "wework":
formatted_title = format_title_for_platform(
"wework", title_data_copy, show_source=False
)
elif format_type == "telegram":
formatted_title = format_title_for_platform(
"telegram", title_data_copy, show_source=False
)
elif format_type == "feishu":
formatted_title = format_title_for_platform(
"feishu", title_data_copy, show_source=False
)
elif format_type == "dingtalk":
formatted_title = format_title_for_platform(
"dingtalk", title_data_copy, show_source=False
)
else:
formatted_title = f"{title_data_copy['title']}"
first_news_line = f" 1. {formatted_title}\n"
# 原子性检查:来源标题+第一条新闻
source_with_first_news = source_header + first_news_line
test_content = current_batch + source_with_first_news
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + new_header + source_with_first_news
current_batch_has_content = True
start_index = 1
else:
current_batch = test_content
current_batch_has_content = True
start_index = 1
# 处理剩余新增新闻
for j in range(start_index, len(source_data["titles"])):
title_data = source_data["titles"][j]
title_data_copy = title_data.copy()
title_data_copy["is_new"] = False
if format_type == "wework":
formatted_title = format_title_for_platform(
"wework", title_data_copy, show_source=False
)
elif format_type == "telegram":
formatted_title = format_title_for_platform(
"telegram", title_data_copy, show_source=False
)
elif format_type == "feishu":
formatted_title = format_title_for_platform(
"feishu", title_data_copy, show_source=False
)
elif format_type == "dingtalk":
formatted_title = format_title_for_platform(
"dingtalk", title_data_copy, show_source=False
)
else:
formatted_title = f"{title_data_copy['title']}"
news_line = f" {j + 1}. {formatted_title}\n"
test_content = current_batch + news_line
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + new_header + source_header + news_line
current_batch_has_content = True
else:
current_batch = test_content
current_batch_has_content = True
current_batch += "\n"
if report_data["failed_ids"]:
failed_header = ""
if format_type == "wework":
failed_header = f"\n\n\n\n⚠️ **数据获取失败的平台:**\n\n"
elif format_type == "telegram":
failed_header = f"\n\n⚠️ 数据获取失败的平台:\n\n"
elif format_type == "ntfy":
failed_header = f"\n\n⚠️ **数据获取失败的平台:**\n\n"
elif format_type == "feishu":
failed_header = f"\n{CONFIG['FEISHU_MESSAGE_SEPARATOR']}\n\n⚠️ **数据获取失败的平台:**\n\n"
elif format_type == "dingtalk":
failed_header = f"\n---\n\n⚠️ **数据获取失败的平台:**\n\n"
test_content = current_batch + failed_header
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + failed_header
current_batch_has_content = True
else:
current_batch = test_content
current_batch_has_content = True
for i, id_value in enumerate(report_data["failed_ids"], 1):
if format_type == "feishu":
failed_line = f" • <font color='red'>{id_value}</font>\n"
elif format_type == "dingtalk":
failed_line = f" • **{id_value}**\n"
else:
failed_line = f" • {id_value}\n"
test_content = current_batch + failed_line
if (
len(test_content.encode("utf-8")) + len(base_footer.encode("utf-8"))
>= max_bytes
):
if current_batch_has_content:
batches.append(current_batch + base_footer)
current_batch = base_header + failed_header + failed_line
current_batch_has_content = True
else:
current_batch = test_content
current_batch_has_content = True
# 完成最后批次
if current_batch_has_content:
batches.append(current_batch + base_footer)
return batches
def send_to_notifications(
stats: List[Dict],
failed_ids: Optional[List] = None,
report_type: str = "当日汇总",
new_titles: Optional[Dict] = None,
id_to_name: Optional[Dict] = None,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
html_file_path: Optional[str] = None,
) -> Dict[str, bool]:
"""发送数据到多个通知平台"""
results = {}
if CONFIG["PUSH_WINDOW"]["ENABLED"]:
push_manager = PushRecordManager()
time_range_start = CONFIG["PUSH_WINDOW"]["TIME_RANGE"]["START"]
time_range_end = CONFIG["PUSH_WINDOW"]["TIME_RANGE"]["END"]
if not push_manager.is_in_time_range(time_range_start, time_range_end):
now = get_beijing_time()
print(
f"推送窗口控制:当前时间 {now.strftime('%H:%M')} 不在推送时间窗口 {time_range_start}-{time_range_end} 内,跳过推送"
)
return results
if CONFIG["PUSH_WINDOW"]["ONCE_PER_DAY"]:
if push_manager.has_pushed_today():
print(f"推送窗口控制:今天已推送过,跳过本次推送")
return results
else:
print(f"推送窗口控制:今天首次推送")
report_data = prepare_report_data(stats, failed_ids, new_titles, id_to_name, mode)
feishu_url = CONFIG["FEISHU_WEBHOOK_URL"]
dingtalk_url = CONFIG["DINGTALK_WEBHOOK_URL"]
wework_url = CONFIG["WEWORK_WEBHOOK_URL"]
telegram_token = CONFIG["TELEGRAM_BOT_TOKEN"]
telegram_chat_id = CONFIG["TELEGRAM_CHAT_ID"]
email_from = CONFIG["EMAIL_FROM"]
email_password = CONFIG["EMAIL_PASSWORD"]
email_to = CONFIG["EMAIL_TO"]
email_smtp_server = CONFIG.get("EMAIL_SMTP_SERVER", "")
email_smtp_port = CONFIG.get("EMAIL_SMTP_PORT", "")
ntfy_server_url = CONFIG["NTFY_SERVER_URL"]
ntfy_topic = CONFIG["NTFY_TOPIC"]
ntfy_token = CONFIG.get("NTFY_TOKEN", "")
bark_url = CONFIG["BARK_URL"]
update_info_to_send = update_info if CONFIG["SHOW_VERSION_UPDATE"] else None
# 发送到飞书
if feishu_url:
results["feishu"] = send_to_feishu(
feishu_url, report_data, report_type, update_info_to_send, proxy_url, mode
)
# 发送到钉钉
if dingtalk_url:
results["dingtalk"] = send_to_dingtalk(
dingtalk_url, report_data, report_type, update_info_to_send, proxy_url, mode
)
# 发送到企业微信
if wework_url:
results["wework"] = send_to_wework(
wework_url, report_data, report_type, update_info_to_send, proxy_url, mode
)
# 发送到 Telegram
if telegram_token and telegram_chat_id:
results["telegram"] = send_to_telegram(
telegram_token,
telegram_chat_id,
report_data,
report_type,
update_info_to_send,
proxy_url,
mode,
)
# 发送到 ntfy
if ntfy_server_url and ntfy_topic:
results["ntfy"] = send_to_ntfy(
ntfy_server_url,
ntfy_topic,
ntfy_token,
report_data,
report_type,
update_info_to_send,
proxy_url,
mode,
)
# 发送到 Bark
if bark_url:
results["bark"] = send_to_bark(
bark_url,
report_data,
report_type,
update_info_to_send,
proxy_url,
mode,
)
# 发送邮件
if email_from and email_password and email_to:
results["email"] = send_to_email(
email_from,
email_password,
email_to,
report_type,
html_file_path,
email_smtp_server,
email_smtp_port,
)
if not results:
print("未配置任何通知渠道,跳过通知发送")
# 如果成功发送了任何通知,且启用了每天只推一次,则记录推送
if (
CONFIG["PUSH_WINDOW"]["ENABLED"]
and CONFIG["PUSH_WINDOW"]["ONCE_PER_DAY"]
and any(results.values())
):
push_manager = PushRecordManager()
push_manager.record_push(report_type)
return results
def send_to_feishu(
webhook_url: str,
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到飞书(支持分批发送)"""
headers = {"Content-Type": "application/json"}
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取分批内容,使用飞书专用的批次大小
batches = split_content_into_batches(
report_data,
"feishu",
update_info,
max_bytes=CONFIG.get("FEISHU_BATCH_SIZE", 29000),
mode=mode,
)
print(f"飞书消息分为 {len(batches)} 批次发送 [{report_type}]")
# 逐批发送
for i, batch_content in enumerate(batches, 1):
batch_size = len(batch_content.encode("utf-8"))
print(
f"发送飞书第 {i}/{len(batches)} 批次,大小:{batch_size} 字节 [{report_type}]"
)
# 添加批次标识
if len(batches) > 1:
batch_header = f"**[第 {i}/{len(batches)} 批次]**\n\n"
# 将批次标识插入到适当位置(在统计标题之后)
if "📊 **热点词汇统计**" in batch_content:
batch_content = batch_content.replace(
"📊 **热点词汇统计**\n\n", f"📊 **热点词汇统计** {batch_header}"
)
else:
# 如果没有统计标题,直接在开头添加
batch_content = batch_header + batch_content
total_titles = sum(
len(stat["titles"]) for stat in report_data["stats"] if stat["count"] > 0
)
now = get_beijing_time()
payload = {
"msg_type": "text",
"content": {
"total_titles": total_titles,
"timestamp": now.strftime("%Y-%m-%d %H:%M:%S"),
"report_type": report_type,
"text": batch_content,
},
}
try:
response = requests.post(
webhook_url, headers=headers, json=payload, proxies=proxies, timeout=30
)
if response.status_code == 200:
result = response.json()
# 检查飞书的响应状态
if result.get("StatusCode") == 0 or result.get("code") == 0:
print(f"飞书第 {i}/{len(batches)} 批次发送成功 [{report_type}]")
# 批次间间隔
if i < len(batches):
time.sleep(CONFIG["BATCH_SEND_INTERVAL"])
else:
error_msg = result.get("msg") or result.get("StatusMessage", "未知错误")
print(
f"飞书第 {i}/{len(batches)} 批次发送失败 [{report_type}],错误:{error_msg}"
)
return False
else:
print(
f"飞书第 {i}/{len(batches)} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
return False
except Exception as e:
print(f"飞书第 {i}/{len(batches)} 批次发送出错 [{report_type}]:{e}")
return False
print(f"飞书所有 {len(batches)} 批次发送完成 [{report_type}]")
return True
def send_to_dingtalk(
webhook_url: str,
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到钉钉(支持分批发送)"""
headers = {"Content-Type": "application/json"}
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取分批内容,使用钉钉专用的批次大小
batches = split_content_into_batches(
report_data,
"dingtalk",
update_info,
max_bytes=CONFIG.get("DINGTALK_BATCH_SIZE", 20000),
mode=mode,
)
print(f"钉钉消息分为 {len(batches)} 批次发送 [{report_type}]")
# 逐批发送
for i, batch_content in enumerate(batches, 1):
batch_size = len(batch_content.encode("utf-8"))
print(
f"发送钉钉第 {i}/{len(batches)} 批次,大小:{batch_size} 字节 [{report_type}]"
)
# 添加批次标识
if len(batches) > 1:
batch_header = f"**[第 {i}/{len(batches)} 批次]**\n\n"
# 将批次标识插入到适当位置(在标题之后)
if "📊 **热点词汇统计**" in batch_content:
batch_content = batch_content.replace(
"📊 **热点词汇统计**\n\n", f"📊 **热点词汇统计** {batch_header}\n\n"
)
else:
# 如果没有统计标题,直接在开头添加
batch_content = batch_header + batch_content
payload = {
"msgtype": "markdown",
"markdown": {
"title": f"TrendRadar 热点分析报告 - {report_type}",
"text": batch_content,
},
}
try:
response = requests.post(
webhook_url, headers=headers, json=payload, proxies=proxies, timeout=30
)
if response.status_code == 200:
result = response.json()
if result.get("errcode") == 0:
print(f"钉钉第 {i}/{len(batches)} 批次发送成功 [{report_type}]")
# 批次间间隔
if i < len(batches):
time.sleep(CONFIG["BATCH_SEND_INTERVAL"])
else:
print(
f"钉钉第 {i}/{len(batches)} 批次发送失败 [{report_type}],错误:{result.get('errmsg')}"
)
return False
else:
print(
f"钉钉第 {i}/{len(batches)} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
return False
except Exception as e:
print(f"钉钉第 {i}/{len(batches)} 批次发送出错 [{report_type}]:{e}")
return False
print(f"钉钉所有 {len(batches)} 批次发送完成 [{report_type}]")
return True
def strip_markdown(text: str) -> str:
"""去除文本中的 markdown 语法格式,用于个人微信推送"""
# 去除粗体 **text** 或 __text__
text = re.sub(r'\*\*(.+?)\*\*', r'\1', text)
text = re.sub(r'__(.+?)__', r'\1', text)
# 去除斜体 *text* 或 _text_
text = re.sub(r'\*(.+?)\*', r'\1', text)
text = re.sub(r'_(.+?)_', r'\1', text)
# 去除删除线 ~~text~~
text = re.sub(r'~~(.+?)~~', r'\1', text)
# 转换链接 [text](url) -> text url(保留 URL)
text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1 \2', text)
# 如果不需要保留 URL,可以使用下面这行(只保留标题文本):
# text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
# 去除图片  -> alt
text = re.sub(r'!\[(.+?)\]\(.+?\)', r'\1', text)
# 去除行内代码 `code`
text = re.sub(r'`(.+?)`', r'\1', text)
# 去除引用符号 >
text = re.sub(r'^>\s*', '', text, flags=re.MULTILINE)
# 去除标题符号 # ## ### 等
text = re.sub(r'^#+\s*', '', text, flags=re.MULTILINE)
# 去除水平分割线 --- 或 ***
text = re.sub(r'^[\-\*]{3,}\s*$', '', text, flags=re.MULTILINE)
# 去除 HTML 标签 <font color='xxx'>text</font> -> text
text = re.sub(r'<font[^>]*>(.+?)</font>', r'\1', text)
text = re.sub(r'<[^>]+>', '', text)
# 清理多余的空行(保留最多两个连续空行)
text = re.sub(r'\n{3,}', '\n\n', text)
return text.strip()
def send_to_wework(
webhook_url: str,
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到企业微信(支持分批发送,支持 markdown 和 text 两种格式)"""
headers = {"Content-Type": "application/json"}
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取消息类型配置(markdown 或 text)
msg_type = CONFIG.get("WEWORK_MSG_TYPE", "markdown").lower()
is_text_mode = msg_type == "text"
if is_text_mode:
print(f"企业微信使用 text 格式(个人微信模式)[{report_type}]")
else:
print(f"企业微信使用 markdown 格式(群机器人模式)[{report_type}]")
# 获取分批内容
batches = split_content_into_batches(report_data, "wework", update_info, mode=mode)
print(f"企业微信消息分为 {len(batches)} 批次发送 [{report_type}]")
# 逐批发送
for i, batch_content in enumerate(batches, 1):
# 添加批次标识
if len(batches) > 1:
if is_text_mode:
batch_header = f"[第 {i}/{len(batches)} 批次]\n\n"
else:
batch_header = f"**[第 {i}/{len(batches)} 批次]**\n\n"
batch_content = batch_header + batch_content
# 根据消息类型构建 payload
if is_text_mode:
# text 格式:去除 markdown 语法
plain_content = strip_markdown(batch_content)
payload = {"msgtype": "text", "text": {"content": plain_content}}
batch_size = len(plain_content.encode("utf-8"))
else:
# markdown 格式:保持原样
payload = {"msgtype": "markdown", "markdown": {"content": batch_content}}
batch_size = len(batch_content.encode("utf-8"))
print(
f"发送企业微信第 {i}/{len(batches)} 批次,大小:{batch_size} 字节 [{report_type}]"
)
try:
response = requests.post(
webhook_url, headers=headers, json=payload, proxies=proxies, timeout=30
)
if response.status_code == 200:
result = response.json()
if result.get("errcode") == 0:
print(f"企业微信第 {i}/{len(batches)} 批次发送成功 [{report_type}]")
# 批次间间隔
if i < len(batches):
time.sleep(CONFIG["BATCH_SEND_INTERVAL"])
else:
print(
f"企业微信第 {i}/{len(batches)} 批次发送失败 [{report_type}],错误:{result.get('errmsg')}"
)
return False
else:
print(
f"企业微信第 {i}/{len(batches)} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
return False
except Exception as e:
print(f"企业微信第 {i}/{len(batches)} 批次发送出错 [{report_type}]:{e}")
return False
print(f"企业微信所有 {len(batches)} 批次发送完成 [{report_type}]")
return True
def send_to_telegram(
bot_token: str,
chat_id: str,
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到Telegram(支持分批发送)"""
headers = {"Content-Type": "application/json"}
url = f"https://api.telegram.org/bot{bot_token}/sendMessage"
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取分批内容
batches = split_content_into_batches(
report_data, "telegram", update_info, mode=mode
)
print(f"Telegram消息分为 {len(batches)} 批次发送 [{report_type}]")
# 逐批发送
for i, batch_content in enumerate(batches, 1):
batch_size = len(batch_content.encode("utf-8"))
print(
f"发送Telegram第 {i}/{len(batches)} 批次,大小:{batch_size} 字节 [{report_type}]"
)
# 添加批次标识
if len(batches) > 1:
batch_header = f"<b>[第 {i}/{len(batches)} 批次]</b>\n\n"
batch_content = batch_header + batch_content
payload = {
"chat_id": chat_id,
"text": batch_content,
"parse_mode": "HTML",
"disable_web_page_preview": True,
}
try:
response = requests.post(
url, headers=headers, json=payload, proxies=proxies, timeout=30
)
if response.status_code == 200:
result = response.json()
if result.get("ok"):
print(f"Telegram第 {i}/{len(batches)} 批次发送成功 [{report_type}]")
# 批次间间隔
if i < len(batches):
time.sleep(CONFIG["BATCH_SEND_INTERVAL"])
else:
print(
f"Telegram第 {i}/{len(batches)} 批次发送失败 [{report_type}],错误:{result.get('description')}"
)
return False
else:
print(
f"Telegram第 {i}/{len(batches)} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
return False
except Exception as e:
print(f"Telegram第 {i}/{len(batches)} 批次发送出错 [{report_type}]:{e}")
return False
print(f"Telegram所有 {len(batches)} 批次发送完成 [{report_type}]")
return True
def send_to_email(
from_email: str,
password: str,
to_email: str,
report_type: str,
html_file_path: str,
custom_smtp_server: Optional[str] = None,
custom_smtp_port: Optional[int] = None,
) -> bool:
"""发送邮件通知"""
try:
if not html_file_path or not Path(html_file_path).exists():
print(f"错误:HTML文件不存在或未提供: {html_file_path}")
return False
print(f"使用HTML文件: {html_file_path}")
with open(html_file_path, "r", encoding="utf-8") as f:
html_content = f.read()
domain = from_email.split("@")[-1].lower()
if custom_smtp_server and custom_smtp_port:
# 使用自定义 SMTP 配置
smtp_server = custom_smtp_server
smtp_port = int(custom_smtp_port)
# 根据端口判断加密方式:465=SSL, 587=TLS
if smtp_port == 465:
use_tls = False # SSL 模式(SMTP_SSL)
elif smtp_port == 587:
use_tls = True # TLS 模式(STARTTLS)
else:
# 其他端口优先尝试 TLS(更安全,更广泛支持)
use_tls = True
elif domain in SMTP_CONFIGS:
# 使用预设配置
config = SMTP_CONFIGS[domain]
smtp_server = config["server"]
smtp_port = config["port"]
use_tls = config["encryption"] == "TLS"
else:
print(f"未识别的邮箱服务商: {domain},使用通用 SMTP 配置")
smtp_server = f"smtp.{domain}"
smtp_port = 587
use_tls = True
msg = MIMEMultipart("alternative")
# 严格按照 RFC 标准设置 From header
sender_name = "TrendRadar"
msg["From"] = formataddr((sender_name, from_email))
# 设置收件人
recipients = [addr.strip() for addr in to_email.split(",")]
if len(recipients) == 1:
msg["To"] = recipients[0]
else:
msg["To"] = ", ".join(recipients)
# 设置邮件主题
now = get_beijing_time()
subject = f"TrendRadar 热点分析报告 - {report_type} - {now.strftime('%m月%d日 %H:%M')}"
msg["Subject"] = Header(subject, "utf-8")
# 设置其他标准 header
msg["MIME-Version"] = "1.0"
msg["Date"] = formatdate(localtime=True)
msg["Message-ID"] = make_msgid()
# 添加纯文本部分(作为备选)
text_content = f"""
TrendRadar 热点分析报告
========================
报告类型:{report_type}
生成时间:{now.strftime('%Y-%m-%d %H:%M:%S')}
请使用支持HTML的邮件客户端查看完整报告内容。
"""
text_part = MIMEText(text_content, "plain", "utf-8")
msg.attach(text_part)
html_part = MIMEText(html_content, "html", "utf-8")
msg.attach(html_part)
print(f"正在发送邮件到 {to_email}...")
print(f"SMTP 服务器: {smtp_server}:{smtp_port}")
print(f"发件人: {from_email}")
try:
if use_tls:
# TLS 模式
server = smtplib.SMTP(smtp_server, smtp_port, timeout=30)
server.set_debuglevel(0) # 设为1可以查看详细调试信息
server.ehlo()
server.starttls()
server.ehlo()
else:
# SSL 模式
server = smtplib.SMTP_SSL(smtp_server, smtp_port, timeout=30)
server.set_debuglevel(0)
server.ehlo()
# 登录
server.login(from_email, password)
# 发送邮件
server.send_message(msg)
server.quit()
print(f"邮件发送成功 [{report_type}] -> {to_email}")
return True
except smtplib.SMTPServerDisconnected:
print(f"邮件发送失败:服务器意外断开连接,请检查网络或稍后重试")
return False
except smtplib.SMTPAuthenticationError as e:
print(f"邮件发送失败:认证错误,请检查邮箱和密码/授权码")
print(f"详细错误: {str(e)}")
return False
except smtplib.SMTPRecipientsRefused as e:
print(f"邮件发送失败:收件人地址被拒绝 {e}")
return False
except smtplib.SMTPSenderRefused as e:
print(f"邮件发送失败:发件人地址被拒绝 {e}")
return False
except smtplib.SMTPDataError as e:
print(f"邮件发送失败:邮件数据错误 {e}")
return False
except smtplib.SMTPConnectError as e:
print(f"邮件发送失败:无法连接到 SMTP 服务器 {smtp_server}:{smtp_port}")
print(f"详细错误: {str(e)}")
return False
except Exception as e:
print(f"邮件发送失败 [{report_type}]:{e}")
import traceback
traceback.print_exc()
return False
def send_to_ntfy(
server_url: str,
topic: str,
token: Optional[str],
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到ntfy(支持分批发送,严格遵守4KB限制)"""
# 避免 HTTP header 编码问题
report_type_en_map = {
"当日汇总": "Daily Summary",
"当前榜单汇总": "Current Ranking",
"增量更新": "Incremental Update",
"实时增量": "Realtime Incremental",
"实时当前榜单": "Realtime Current Ranking",
}
report_type_en = report_type_en_map.get(report_type, "News Report")
headers = {
"Content-Type": "text/plain; charset=utf-8",
"Markdown": "yes",
"Title": report_type_en,
"Priority": "default",
"Tags": "news",
}
if token:
headers["Authorization"] = f"Bearer {token}"
# 构建完整URL,确保格式正确
base_url = server_url.rstrip("/")
if not base_url.startswith(("http://", "https://")):
base_url = f"https://{base_url}"
url = f"{base_url}/{topic}"
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取分批内容,使用ntfy专用的4KB限制
batches = split_content_into_batches(
report_data, "ntfy", update_info, max_bytes=3800, mode=mode
)
total_batches = len(batches)
print(f"ntfy消息分为 {total_batches} 批次发送 [{report_type}]")
# 反转批次顺序,使得在ntfy客户端显示时顺序正确
# ntfy显示最新消息在上面,所以我们从最后一批开始推送
reversed_batches = list(reversed(batches))
print(f"ntfy将按反向顺序推送(最后批次先推送),确保客户端显示顺序正确")
# 逐批发送(反向顺序)
success_count = 0
for idx, batch_content in enumerate(reversed_batches, 1):
# 计算正确的批次编号(用户视角的编号)
actual_batch_num = total_batches - idx + 1
batch_size = len(batch_content.encode("utf-8"))
print(
f"发送ntfy第 {actual_batch_num}/{total_batches} 批次(推送顺序: {idx}/{total_batches}),大小:{batch_size} 字节 [{report_type}]"
)
# 检查消息大小,确保不超过4KB
if batch_size > 4096:
print(f"警告:ntfy第 {actual_batch_num} 批次消息过大({batch_size} 字节),可能被拒绝")
# 添加批次标识(使用正确的批次编号)
current_headers = headers.copy()
if total_batches > 1:
batch_header = f"**[第 {actual_batch_num}/{total_batches} 批次]**\n\n"
batch_content = batch_header + batch_content
current_headers["Title"] = (
f"{report_type_en} ({actual_batch_num}/{total_batches})"
)
try:
response = requests.post(
url,
headers=current_headers,
data=batch_content.encode("utf-8"),
proxies=proxies,
timeout=30,
)
if response.status_code == 200:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次发送成功 [{report_type}]")
success_count += 1
if idx < total_batches:
# 公共服务器建议 2-3 秒,自托管可以更短
interval = 2 if "ntfy.sh" in server_url else 1
time.sleep(interval)
elif response.status_code == 429:
print(
f"ntfy第 {actual_batch_num}/{total_batches} 批次速率限制 [{report_type}],等待后重试"
)
time.sleep(10) # 等待10秒后重试
# 重试一次
retry_response = requests.post(
url,
headers=current_headers,
data=batch_content.encode("utf-8"),
proxies=proxies,
timeout=30,
)
if retry_response.status_code == 200:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次重试成功 [{report_type}]")
success_count += 1
else:
print(
f"ntfy第 {actual_batch_num}/{total_batches} 批次重试失败,状态码:{retry_response.status_code}"
)
elif response.status_code == 413:
print(
f"ntfy第 {actual_batch_num}/{total_batches} 批次消息过大被拒绝 [{report_type}],消息大小:{batch_size} 字节"
)
else:
print(
f"ntfy第 {actual_batch_num}/{total_batches} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
try:
print(f"错误详情:{response.text}")
except:
pass
except requests.exceptions.ConnectTimeout:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次连接超时 [{report_type}]")
except requests.exceptions.ReadTimeout:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次读取超时 [{report_type}]")
except requests.exceptions.ConnectionError as e:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次连接错误 [{report_type}]:{e}")
except Exception as e:
print(f"ntfy第 {actual_batch_num}/{total_batches} 批次发送异常 [{report_type}]:{e}")
# 判断整体发送是否成功
if success_count == total_batches:
print(f"ntfy所有 {total_batches} 批次发送完成 [{report_type}]")
return True
elif success_count > 0:
print(f"ntfy部分发送成功:{success_count}/{total_batches} 批次 [{report_type}]")
return True # 部分成功也视为成功
else:
print(f"ntfy发送完全失败 [{report_type}]")
return False
def send_to_bark(
bark_url: str,
report_data: Dict,
report_type: str,
update_info: Optional[Dict] = None,
proxy_url: Optional[str] = None,
mode: str = "daily",
) -> bool:
"""发送到Bark(支持分批发送,使用纯文本格式)"""
proxies = None
if proxy_url:
proxies = {"http": proxy_url, "https": proxy_url}
# 获取分批内容(Bark 限制为 3600 字节以避免 413 错误)
batches = split_content_into_batches(
report_data, "wework", update_info, max_bytes=CONFIG["BARK_BATCH_SIZE"], mode=mode
)
total_batches = len(batches)
print(f"Bark消息分为 {total_batches} 批次发送 [{report_type}]")
# 反转批次顺序,使得在Bark客户端显示时顺序正确
# Bark显示最新消息在上面,所以我们从最后一批开始推送
reversed_batches = list(reversed(batches))
print(f"Bark将按反向顺序推送(最后批次先推送),确保客户端显示顺序正确")
# 逐批发送(反向顺序)
success_count = 0
for idx, batch_content in enumerate(reversed_batches, 1):
# 计算正确的批次编号(用户视角的编号)
actual_batch_num = total_batches - idx + 1
# 添加批次标识(使用正确的批次编号)
if total_batches > 1:
batch_header = f"[第 {actual_batch_num}/{total_batches} 批次]\n\n"
batch_content = batch_header + batch_content
# 清理 markdown 语法(Bark 不支持 markdown)
plain_content = strip_markdown(batch_content)
batch_size = len(plain_content.encode("utf-8"))
print(
f"发送Bark第 {actual_batch_num}/{total_batches} 批次(推送顺序: {idx}/{total_batches}),大小:{batch_size} 字节 [{report_type}]"
)
# 检查消息大小(Bark使用APNs,限制4KB)
if batch_size > 4096:
print(
f"警告:Bark第 {actual_batch_num}/{total_batches} 批次消息过大({batch_size} 字节),可能被拒绝"
)
# 构建JSON payload
payload = {
"title": report_type,
"body": plain_content,
"sound": "default",
"group": "TrendRadar",
}
try:
response = requests.post(
bark_url,
json=payload,
proxies=proxies,
timeout=30,
)
if response.status_code == 200:
result = response.json()
if result.get("code") == 200:
print(f"Bark第 {actual_batch_num}/{total_batches} 批次发送成功 [{report_type}]")
success_count += 1
# 批次间间隔
if idx < total_batches:
time.sleep(CONFIG["BATCH_SEND_INTERVAL"])
else:
print(
f"Bark第 {actual_batch_num}/{total_batches} 批次发送失败 [{report_type}],错误:{result.get('message', '未知错误')}"
)
else:
print(
f"Bark第 {actual_batch_num}/{total_batches} 批次发送失败 [{report_type}],状态码:{response.status_code}"
)
try:
print(f"错误详情:{response.text}")
except:
pass
except requests.exceptions.ConnectTimeout:
print(f"Bark第 {actual_batch_num}/{total_batches} 批次连接超时 [{report_type}]")
except requests.exceptions.ReadTimeout:
print(f"Bark第 {actual_batch_num}/{total_batches} 批次读取超时 [{report_type}]")
except requests.exceptions.ConnectionError as e:
print(f"Bark第 {actual_batch_num}/{total_batches} 批次连接错误 [{report_type}]:{e}")
except Exception as e:
print(f"Bark第 {actual_batch_num}/{total_batches} 批次发送异常 [{report_type}]:{e}")
# 判断整体发送是否成功
if success_count == total_batches:
print(f"Bark所有 {total_batches} 批次发送完成 [{report_type}]")
return True
elif success_count > 0:
print(f"Bark部分发送成功:{success_count}/{total_batches} 批次 [{report_type}]")
return True # 部分成功也视为成功
else:
print(f"Bark发送完全失败 [{report_type}]")
return False
# === 主分析器 ===
class NewsAnalyzer:
"""新闻分析器"""
# 模式策略定义
MODE_STRATEGIES = {
"incremental": {
"mode_name": "增量模式",
"description": "增量模式(只关注新增新闻,无新增时不推送)",
"realtime_report_type": "实时增量",
"summary_report_type": "当日汇总",
"should_send_realtime": True,
"should_generate_summary": True,
"summary_mode": "daily",
},
"current": {
"mode_name": "当前榜单模式",
"description": "当前榜单模式(当前榜单匹配新闻 + 新增新闻区域 + 按时推送)",
"realtime_report_type": "实时当前榜单",
"summary_report_type": "当前榜单汇总",
"should_send_realtime": True,
"should_generate_summary": True,
"summary_mode": "current",
},
"daily": {
"mode_name": "当日汇总模式",
"description": "当日汇总模式(所有匹配新闻 + 新增新闻区域 + 按时推送)",
"realtime_report_type": "",
"summary_report_type": "当日汇总",
"should_send_realtime": False,
"should_generate_summary": True,
"summary_mode": "daily",
},
}
def __init__(self):
self.request_interval = CONFIG["REQUEST_INTERVAL"]
self.report_mode = CONFIG["REPORT_MODE"]
self.rank_threshold = CONFIG["RANK_THRESHOLD"]
self.is_github_actions = os.environ.get("GITHUB_ACTIONS") == "true"
self.is_docker_container = self._detect_docker_environment()
self.update_info = None
self.proxy_url = None
self._setup_proxy()
self.data_fetcher = DataFetcher(self.proxy_url)
if self.is_github_actions:
self._check_version_update()
def _detect_docker_environment(self) -> bool:
"""检测是否运行在 Docker 容器中"""
try:
if os.environ.get("DOCKER_CONTAINER") == "true":
return True
if os.path.exists("/.dockerenv"):
return True
return False
except Exception:
return False
def _should_open_browser(self) -> bool:
"""判断是否应该打开浏览器"""
return not self.is_github_actions and not self.is_docker_container
def _setup_proxy(self) -> None:
"""设置代理配置"""
if not self.is_github_actions and CONFIG["USE_PROXY"]:
self.proxy_url = CONFIG["DEFAULT_PROXY"]
print("本地环境,使用代理")
elif not self.is_github_actions and not CONFIG["USE_PROXY"]:
print("本地环境,未启用代理")
else:
print("GitHub Actions环境,不使用代理")
def _check_version_update(self) -> None:
"""检查版本更新"""
try:
need_update, remote_version = check_version_update(
VERSION, CONFIG["VERSION_CHECK_URL"], self.proxy_url
)
if need_update and remote_version:
self.update_info = {
"current_version": VERSION,
"remote_version": remote_version,
}
print(f"发现新版本: {remote_version} (当前: {VERSION})")
else:
print("版本检查完成,当前为最新版本")
except Exception as e:
print(f"版本检查出错: {e}")
def _get_mode_strategy(self) -> Dict:
"""获取当前模式的策略配置"""
return self.MODE_STRATEGIES.get(self.report_mode, self.MODE_STRATEGIES["daily"])
def _has_notification_configured(self) -> bool:
"""检查是否配置了任何通知渠道"""
return any(
[
CONFIG["FEISHU_WEBHOOK_URL"],
CONFIG["DINGTALK_WEBHOOK_URL"],
CONFIG["WEWORK_WEBHOOK_URL"],
(CONFIG["TELEGRAM_BOT_TOKEN"] and CONFIG["TELEGRAM_CHAT_ID"]),
(
CONFIG["EMAIL_FROM"]
and CONFIG["EMAIL_PASSWORD"]
and CONFIG["EMAIL_TO"]
),
(CONFIG["NTFY_SERVER_URL"] and CONFIG["NTFY_TOPIC"]),
CONFIG["BARK_URL"],
]
)
def _has_valid_content(
self, stats: List[Dict], new_titles: Optional[Dict] = None
) -> bool:
"""检查是否有有效的新闻内容"""
if self.report_mode in ["incremental", "current"]:
# 增量模式和current模式下,只要stats有内容就说明有匹配的新闻
return any(stat["count"] > 0 for stat in stats)
else:
# 当日汇总模式下,检查是否有匹配的频率词新闻或新增新闻
has_matched_news = any(stat["count"] > 0 for stat in stats)
has_new_news = bool(
new_titles and any(len(titles) > 0 for titles in new_titles.values())
)
return has_matched_news or has_new_news
def _load_analysis_data(
self,
) -> Optional[Tuple[Dict, Dict, Dict, Dict, List, List]]:
"""统一的数据加载和预处理,使用当前监控平台列表过滤历史数据"""
try:
# 获取当前配置的监控平台ID列表
current_platform_ids = []
for platform in CONFIG["PLATFORMS"]:
current_platform_ids.append(platform["id"])
print(f"当前监控平台: {current_platform_ids}")
all_results, id_to_name, title_info = read_all_today_titles(
current_platform_ids
)
if not all_results:
print("没有找到当天的数据")
return None
total_titles = sum(len(titles) for titles in all_results.values())
print(f"读取到 {total_titles} 个标题(已按当前监控平台过滤)")
new_titles = detect_latest_new_titles(current_platform_ids)
word_groups, filter_words = load_frequency_words()
return (
all_results,
id_to_name,
title_info,
new_titles,
word_groups,
filter_words,
)
except Exception as e:
print(f"数据加载失败: {e}")
return None
def _prepare_current_title_info(self, results: Dict, time_info: str) -> Dict:
"""从当前抓取结果构建标题信息"""
title_info = {}
for source_id, titles_data in results.items():
title_info[source_id] = {}
for title, title_data in titles_data.items():
ranks = title_data.get("ranks", [])
url = title_data.get("url", "")
mobile_url = title_data.get("mobileUrl", "")
title_info[source_id][title] = {
"first_time": time_info,
"last_time": time_info,
"count": 1,
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
return title_info
def _run_analysis_pipeline(
self,
data_source: Dict,
mode: str,
title_info: Dict,
new_titles: Dict,
word_groups: List[Dict],
filter_words: List[str],
id_to_name: Dict,
failed_ids: Optional[List] = None,
is_daily_summary: bool = False,
) -> Tuple[List[Dict], str]:
"""统一的分析流水线:数据处理 → 统计计算 → HTML生成"""
# 统计计算
stats, total_titles = count_word_frequency(
data_source,
word_groups,
filter_words,
id_to_name,
title_info,
self.rank_threshold,
new_titles,
mode=mode,
)
# HTML生成
html_file = generate_html_report(
stats,
total_titles,
failed_ids=failed_ids,
new_titles=new_titles,
id_to_name=id_to_name,
mode=mode,
is_daily_summary=is_daily_summary,
update_info=self.update_info if CONFIG["SHOW_VERSION_UPDATE"] else None,
)
return stats, html_file
def _send_notification_if_needed(
self,
stats: List[Dict],
report_type: str,
mode: str,
failed_ids: Optional[List] = None,
new_titles: Optional[Dict] = None,
id_to_name: Optional[Dict] = None,
html_file_path: Optional[str] = None,
) -> bool:
"""统一的通知发送逻辑,包含所有判断条件"""
has_notification = self._has_notification_configured()
if (
CONFIG["ENABLE_NOTIFICATION"]
and has_notification
and self._has_valid_content(stats, new_titles)
):
send_to_notifications(
stats,
failed_ids or [],
report_type,
new_titles,
id_to_name,
self.update_info,
self.proxy_url,
mode=mode,
html_file_path=html_file_path,
)
return True
elif CONFIG["ENABLE_NOTIFICATION"] and not has_notification:
print("⚠️ 警告:通知功能已启用但未配置任何通知渠道,将跳过通知发送")
elif not CONFIG["ENABLE_NOTIFICATION"]:
print(f"跳过{report_type}通知:通知功能已禁用")
elif (
CONFIG["ENABLE_NOTIFICATION"]
and has_notification
and not self._has_valid_content(stats, new_titles)
):
mode_strategy = self._get_mode_strategy()
if "实时" in report_type:
print(
f"跳过实时推送通知:{mode_strategy['mode_name']}下未检测到匹配的新闻"
)
else:
print(
f"跳过{mode_strategy['summary_report_type']}通知:未匹配到有效的新闻内容"
)
return False
def _generate_summary_report(self, mode_strategy: Dict) -> Optional[str]:
"""生成汇总报告(带通知)"""
summary_type = (
"当前榜单汇总" if mode_strategy["summary_mode"] == "current" else "当日汇总"
)
print(f"生成{summary_type}报告...")
# 加载分析数据
analysis_data = self._load_analysis_data()
if not analysis_data:
return None
all_results, id_to_name, title_info, new_titles, word_groups, filter_words = (
analysis_data
)
# 运行分析流水线
stats, html_file = self._run_analysis_pipeline(
all_results,
mode_strategy["summary_mode"],
title_info,
new_titles,
word_groups,
filter_words,
id_to_name,
is_daily_summary=True,
)
print(f"{summary_type}报告已生成: {html_file}")
# 发送通知
self._send_notification_if_needed(
stats,
mode_strategy["summary_report_type"],
mode_strategy["summary_mode"],
failed_ids=[],
new_titles=new_titles,
id_to_name=id_to_name,
html_file_path=html_file,
)
return html_file
def _generate_summary_html(self, mode: str = "daily") -> Optional[str]:
"""生成汇总HTML"""
summary_type = "当前榜单汇总" if mode == "current" else "当日汇总"
print(f"生成{summary_type}HTML...")
# 加载分析数据
analysis_data = self._load_analysis_data()
if not analysis_data:
return None
all_results, id_to_name, title_info, new_titles, word_groups, filter_words = (
analysis_data
)
# 运行分析流水线
_, html_file = self._run_analysis_pipeline(
all_results,
mode,
title_info,
new_titles,
word_groups,
filter_words,
id_to_name,
is_daily_summary=True,
)
print(f"{summary_type}HTML已生成: {html_file}")
return html_file
def _initialize_and_check_config(self) -> None:
"""通用初始化和配置检查"""
now = get_beijing_time()
print(f"当前北京时间: {now.strftime('%Y-%m-%d %H:%M:%S')}")
if not CONFIG["ENABLE_CRAWLER"]:
print("爬虫功能已禁用(ENABLE_CRAWLER=False),程序退出")
return
has_notification = self._has_notification_configured()
if not CONFIG["ENABLE_NOTIFICATION"]:
print("通知功能已禁用(ENABLE_NOTIFICATION=False),将只进行数据抓取")
elif not has_notification:
print("未配置任何通知渠道,将只进行数据抓取,不发送通知")
else:
print("通知功能已启用,将发送通知")
mode_strategy = self._get_mode_strategy()
print(f"报告模式: {self.report_mode}")
print(f"运行模式: {mode_strategy['description']}")
def _crawl_data(self) -> Tuple[Dict, Dict, List]:
"""执行数据爬取"""
ids = []
for platform in CONFIG["PLATFORMS"]:
if "name" in platform:
ids.append((platform["id"], platform["name"]))
else:
ids.append(platform["id"])
print(
f"配置的监控平台: {[p.get('name', p['id']) for p in CONFIG['PLATFORMS']]}"
)
print(f"开始爬取数据,请求间隔 {self.request_interval} 毫秒")
ensure_directory_exists("output")
results, id_to_name, failed_ids = self.data_fetcher.crawl_websites(
ids, self.request_interval
)
title_file = save_titles_to_file(results, id_to_name, failed_ids)
print(f"标题已保存到: {title_file}")
return results, id_to_name, failed_ids
def _execute_mode_strategy(
self, mode_strategy: Dict, results: Dict, id_to_name: Dict, failed_ids: List
) -> Optional[str]:
"""执行模式特定逻辑"""
# 获取当前监控平台ID列表
current_platform_ids = [platform["id"] for platform in CONFIG["PLATFORMS"]]
new_titles = detect_latest_new_titles(current_platform_ids)
time_info = Path(save_titles_to_file(results, id_to_name, failed_ids)).stem
word_groups, filter_words = load_frequency_words()
# current模式下,实时推送需要使用完整的历史数据来保证统计信息的完整性
if self.report_mode == "current":
# 加载完整的历史数据(已按当前平台过滤)
analysis_data = self._load_analysis_data()
if analysis_data:
(
all_results,
historical_id_to_name,
historical_title_info,
historical_new_titles,
_,
_,
) = analysis_data
print(
f"current模式:使用过滤后的历史数据,包含平台:{list(all_results.keys())}"
)
stats, html_file = self._run_analysis_pipeline(
all_results,
self.report_mode,
historical_title_info,
historical_new_titles,
word_groups,
filter_words,
historical_id_to_name,
failed_ids=failed_ids,
)
combined_id_to_name = {**historical_id_to_name, **id_to_name}
print(f"HTML报告已生成: {html_file}")
# 发送实时通知(使用完整历史数据的统计结果)
summary_html = None
if mode_strategy["should_send_realtime"]:
self._send_notification_if_needed(
stats,
mode_strategy["realtime_report_type"],
self.report_mode,
failed_ids=failed_ids,
new_titles=historical_new_titles,
id_to_name=combined_id_to_name,
html_file_path=html_file,
)
else:
print("❌ 严重错误:无法读取刚保存的数据文件")
raise RuntimeError("数据一致性检查失败:保存后立即读取失败")
else:
title_info = self._prepare_current_title_info(results, time_info)
stats, html_file = self._run_analysis_pipeline(
results,
self.report_mode,
title_info,
new_titles,
word_groups,
filter_words,
id_to_name,
failed_ids=failed_ids,
)
print(f"HTML报告已生成: {html_file}")
# 发送实时通知(如果需要)
summary_html = None
if mode_strategy["should_send_realtime"]:
self._send_notification_if_needed(
stats,
mode_strategy["realtime_report_type"],
self.report_mode,
failed_ids=failed_ids,
new_titles=new_titles,
id_to_name=id_to_name,
html_file_path=html_file,
)
# 生成汇总报告(如果需要)
summary_html = None
if mode_strategy["should_generate_summary"]:
if mode_strategy["should_send_realtime"]:
# 如果已经发送了实时通知,汇总只生成HTML不发送通知
summary_html = self._generate_summary_html(
mode_strategy["summary_mode"]
)
else:
# daily模式:直接生成汇总报告并发送通知
summary_html = self._generate_summary_report(mode_strategy)
# 打开浏览器(仅在非容器环境)
if self._should_open_browser() and html_file:
if summary_html:
summary_url = "file://" + str(Path(summary_html).resolve())
print(f"正在打开汇总报告: {summary_url}")
webbrowser.open(summary_url)
else:
file_url = "file://" + str(Path(html_file).resolve())
print(f"正在打开HTML报告: {file_url}")
webbrowser.open(file_url)
elif self.is_docker_container and html_file:
if summary_html:
print(f"汇总报告已生成(Docker环境): {summary_html}")
else:
print(f"HTML报告已生成(Docker环境): {html_file}")
return summary_html
def run(self) -> None:
"""执行分析流程"""
try:
self._initialize_and_check_config()
mode_strategy = self._get_mode_strategy()
results, id_to_name, failed_ids = self._crawl_data()
self._execute_mode_strategy(mode_strategy, results, id_to_name, failed_ids)
except Exception as e:
print(f"分析流程执行出错: {e}")
raise
def main():
try:
analyzer = NewsAnalyzer()
analyzer.run()
except FileNotFoundError as e:
print(f"❌ 配置文件错误: {e}")
print("\n请确保以下文件存在:")
print(" • config/config.yaml")
print(" • config/frequency_words.txt")
print("\n参考项目文档进行正确配置")
except Exception as e:
print(f"❌ 程序运行错误: {e}")
raise
if __name__ == "__main__":
main()
|
2302_81331056/TrendRadar
|
main.py
|
Python
|
agpl-3.0
| 181,537
|
"""
TrendRadar MCP Server
提供基于MCP协议的新闻聚合数据查询和系统管理接口。
"""
__version__ = "1.0.0"
|
2302_81331056/TrendRadar
|
mcp_server/__init__.py
|
Python
|
agpl-3.0
| 127
|
"""
TrendRadar MCP Server - FastMCP 2.0 实现
使用 FastMCP 2.0 提供生产级 MCP 工具服务器。
支持 stdio 和 HTTP 两种传输模式。
"""
import json
from typing import List, Optional, Dict
from fastmcp import FastMCP
from .tools.data_query import DataQueryTools
from .tools.analytics import AnalyticsTools
from .tools.search_tools import SearchTools
from .tools.config_mgmt import ConfigManagementTools
from .tools.system import SystemManagementTools
# 创建 FastMCP 2.0 应用
mcp = FastMCP('trendradar-news')
# 全局工具实例(在第一次请求时初始化)
_tools_instances = {}
def _get_tools(project_root: Optional[str] = None):
"""获取或创建工具实例(单例模式)"""
if not _tools_instances:
_tools_instances['data'] = DataQueryTools(project_root)
_tools_instances['analytics'] = AnalyticsTools(project_root)
_tools_instances['search'] = SearchTools(project_root)
_tools_instances['config'] = ConfigManagementTools(project_root)
_tools_instances['system'] = SystemManagementTools(project_root)
return _tools_instances
# ==================== 数据查询工具 ====================
@mcp.tool
async def get_latest_news(
platforms: Optional[List[str]] = None,
limit: int = 50,
include_url: bool = False
) -> str:
"""
获取最新一批爬取的新闻数据,快速了解当前热点
Args:
platforms: 平台ID列表,如 ['zhihu', 'weibo', 'douyin']
- 不指定时:使用 config.yaml 中配置的所有平台
- 支持的平台来自 config/config.yaml 的 platforms 配置
- 每个平台都有对应的name字段(如"知乎"、"微博"),方便AI识别
limit: 返回条数限制,默认50,最大1000
注意:实际返回数量可能少于请求值,取决于当前可用的新闻总数
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的新闻列表
**重要:数据展示建议**
本工具会返回完整的新闻列表(通常50条)给你。但请注意:
- **工具返回**:完整的50条数据 ✅
- **建议展示**:向用户展示全部数据,除非用户明确要求总结
- **用户期望**:用户可能需要完整数据,请谨慎总结
**何时可以总结**:
- 用户明确说"给我总结一下"或"挑重点说"
- 数据量超过100条时,可先展示部分并询问是否查看全部
**注意**:如果用户询问"为什么只显示了部分",说明他们需要完整数据
"""
tools = _get_tools()
result = tools['data'].get_latest_news(platforms=platforms, limit=limit, include_url=include_url)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def get_trending_topics(
top_n: int = 10,
mode: str = 'current'
) -> str:
"""
获取个人关注词的新闻出现频率统计(基于 config/frequency_words.txt)
注意:本工具不是自动提取新闻热点,而是统计你在 config/frequency_words.txt 中
设置的个人关注词在新闻中出现的频率。你可以自定义这个关注词列表。
Args:
top_n: 返回TOP N关注词,默认10
mode: 模式选择
- daily: 当日累计数据统计
- current: 最新一批数据统计(默认)
Returns:
JSON格式的关注词频率统计列表
"""
tools = _get_tools()
result = tools['data'].get_trending_topics(top_n=top_n, mode=mode)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def get_news_by_date(
date_query: Optional[str] = None,
platforms: Optional[List[str]] = None,
limit: int = 50,
include_url: bool = False
) -> str:
"""
获取指定日期的新闻数据,用于历史数据分析和对比
Args:
date_query: 日期查询,可选格式:
- 自然语言: "今天", "昨天", "前天", "3天前"
- 标准日期: "2024-01-15", "2024/01/15"
- 默认值: "今天"(节省token)
platforms: 平台ID列表,如 ['zhihu', 'weibo', 'douyin']
- 不指定时:使用 config.yaml 中配置的所有平台
- 支持的平台来自 config/config.yaml 的 platforms 配置
- 每个平台都有对应的name字段(如"知乎"、"微博"),方便AI识别
limit: 返回条数限制,默认50,最大1000
注意:实际返回数量可能少于请求值,取决于指定日期的新闻总数
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的新闻列表,包含标题、平台、排名等信息
**重要:数据展示建议**
本工具会返回完整的新闻列表(通常50条)给你。但请注意:
- **工具返回**:完整的50条数据 ✅
- **建议展示**:向用户展示全部数据,除非用户明确要求总结
- **用户期望**:用户可能需要完整数据,请谨慎总结
**何时可以总结**:
- 用户明确说"给我总结一下"或"挑重点说"
- 数据量超过100条时,可先展示部分并询问是否查看全部
**注意**:如果用户询问"为什么只显示了部分",说明他们需要完整数据
"""
tools = _get_tools()
result = tools['data'].get_news_by_date(
date_query=date_query,
platforms=platforms,
limit=limit,
include_url=include_url
)
return json.dumps(result, ensure_ascii=False, indent=2)
# ==================== 高级数据分析工具 ====================
@mcp.tool
async def analyze_topic_trend(
topic: str,
analysis_type: str = "trend",
date_range: Optional[Dict[str, str]] = None,
granularity: str = "day",
threshold: float = 3.0,
time_window: int = 24,
lookahead_hours: int = 6,
confidence_threshold: float = 0.7
) -> str:
"""
统一话题趋势分析工具 - 整合多种趋势分析模式
Args:
topic: 话题关键词(必需)
analysis_type: 分析类型,可选值:
- "trend": 热度趋势分析(追踪话题的热度变化)
- "lifecycle": 生命周期分析(从出现到消失的完整周期)
- "viral": 异常热度检测(识别突然爆火的话题)
- "predict": 话题预测(预测未来可能的热点)
date_range: 日期范围(trend和lifecycle模式),可选
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}(必须是标准日期格式)
- **说明**: AI必须根据当前日期自动计算并填入具体日期,不能使用"今天"等自然语言
- **计算示例**:
- 用户说"最近7天" → AI计算: {"start": "2025-11-11", "end": "2025-11-17"}(假设今天是11-17)
- 用户说"上周" → AI计算: {"start": "2025-11-11", "end": "2025-11-17"}(上周一到上周日)
- 用户说"本月" → AI计算: {"start": "2025-11-01", "end": "2025-11-17"}(11月1日到今天)
- **默认**: 不指定时默认分析最近7天
granularity: 时间粒度(trend模式),默认"day"(仅支持 day,因为底层数据按天聚合)
threshold: 热度突增倍数阈值(viral模式),默认3.0
time_window: 检测时间窗口小时数(viral模式),默认24
lookahead_hours: 预测未来小时数(predict模式),默认6
confidence_threshold: 置信度阈值(predict模式),默认0.7
Returns:
JSON格式的趋势分析结果
**AI使用说明:**
当用户使用相对时间表达时(如"最近7天"、"过去一周"、"上个月"),
AI必须根据当前日期(从环境 <env> 获取)计算出具体的 YYYY-MM-DD 格式日期。
**重要**:date_range 不接受"今天"、"昨天"等自然语言,必须是 YYYY-MM-DD 格式!
Examples (假设今天是 2025-11-17):
- 用户:"分析AI最近7天的趋势"
→ analyze_topic_trend(topic="人工智能", analysis_type="trend", date_range={"start": "2025-11-11", "end": "2025-11-17"})
- 用户:"看看特斯拉本月的热度"
→ analyze_topic_trend(topic="特斯拉", analysis_type="lifecycle", date_range={"start": "2025-11-01", "end": "2025-11-17"})
- analyze_topic_trend(topic="比特币", analysis_type="viral", threshold=3.0)
- analyze_topic_trend(topic="ChatGPT", analysis_type="predict", lookahead_hours=6)
"""
tools = _get_tools()
result = tools['analytics'].analyze_topic_trend_unified(
topic=topic,
analysis_type=analysis_type,
date_range=date_range,
granularity=granularity,
threshold=threshold,
time_window=time_window,
lookahead_hours=lookahead_hours,
confidence_threshold=confidence_threshold
)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def analyze_data_insights(
insight_type: str = "platform_compare",
topic: Optional[str] = None,
date_range: Optional[Dict[str, str]] = None,
min_frequency: int = 3,
top_n: int = 20
) -> str:
"""
统一数据洞察分析工具 - 整合多种数据分析模式
Args:
insight_type: 洞察类型,可选值:
- "platform_compare": 平台对比分析(对比不同平台对话题的关注度)
- "platform_activity": 平台活跃度统计(统计各平台发布频率和活跃时间)
- "keyword_cooccur": 关键词共现分析(分析关键词同时出现的模式)
topic: 话题关键词(可选,platform_compare模式适用)
date_range: **【对象类型】** 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **示例**: {"start": "2025-01-01", "end": "2025-01-07"}
- **重要**: 必须是对象格式,不能传递整数
min_frequency: 最小共现频次(keyword_cooccur模式),默认3
top_n: 返回TOP N结果(keyword_cooccur模式),默认20
Returns:
JSON格式的数据洞察分析结果
Examples:
- analyze_data_insights(insight_type="platform_compare", topic="人工智能")
- analyze_data_insights(insight_type="platform_activity", date_range={"start": "2025-01-01", "end": "2025-01-07"})
- analyze_data_insights(insight_type="keyword_cooccur", min_frequency=5, top_n=15)
"""
tools = _get_tools()
result = tools['analytics'].analyze_data_insights_unified(
insight_type=insight_type,
topic=topic,
date_range=date_range,
min_frequency=min_frequency,
top_n=top_n
)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def analyze_sentiment(
topic: Optional[str] = None,
platforms: Optional[List[str]] = None,
date_range: Optional[Dict[str, str]] = None,
limit: int = 50,
sort_by_weight: bool = True,
include_url: bool = False
) -> str:
"""
分析新闻的情感倾向和热度趋势
Args:
topic: 话题关键词(可选)
platforms: 平台ID列表,如 ['zhihu', 'weibo', 'douyin']
- 不指定时:使用 config.yaml 中配置的所有平台
- 支持的平台来自 config/config.yaml 的 platforms 配置
- 每个平台都有对应的name字段(如"知乎"、"微博"),方便AI识别
date_range: **【对象类型】** 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **示例**: {"start": "2025-01-01", "end": "2025-01-07"}
- **重要**: 必须是对象格式,不能传递整数
limit: 返回新闻数量,默认50,最大100
注意:本工具会对新闻标题进行去重(同一标题在不同平台只保留一次),
因此实际返回数量可能少于请求的 limit 值
sort_by_weight: 是否按热度权重排序,默认True
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的分析结果,包含情感分布、热度趋势和相关新闻
**重要:数据展示策略**
- 本工具返回完整的分析结果和新闻列表
- **默认展示方式**:展示完整的分析结果(包括所有新闻)
- 仅在用户明确要求"总结"或"挑重点"时才进行筛选
"""
tools = _get_tools()
result = tools['analytics'].analyze_sentiment(
topic=topic,
platforms=platforms,
date_range=date_range,
limit=limit,
sort_by_weight=sort_by_weight,
include_url=include_url
)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def find_similar_news(
reference_title: str,
threshold: float = 0.6,
limit: int = 50,
include_url: bool = False
) -> str:
"""
查找与指定新闻标题相似的其他新闻
Args:
reference_title: 新闻标题(完整或部分)
threshold: 相似度阈值,0-1之间,默认0.6
注意:阈值越高匹配越严格,返回结果越少
limit: 返回条数限制,默认50,最大100
注意:实际返回数量取决于相似度匹配结果,可能少于请求值
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的相似新闻列表,包含相似度分数
**重要:数据展示策略**
- 本工具返回完整的相似新闻列表
- **默认展示方式**:展示全部返回的新闻(包括相似度分数)
- 仅在用户明确要求"总结"或"挑重点"时才进行筛选
"""
tools = _get_tools()
result = tools['analytics'].find_similar_news(
reference_title=reference_title,
threshold=threshold,
limit=limit,
include_url=include_url
)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def generate_summary_report(
report_type: str = "daily",
date_range: Optional[Dict[str, str]] = None
) -> str:
"""
每日/每周摘要生成器 - 自动生成热点摘要报告
Args:
report_type: 报告类型(daily/weekly)
date_range: **【对象类型】** 自定义日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **示例**: {"start": "2025-01-01", "end": "2025-01-07"}
- **重要**: 必须是对象格式,不能传递整数
Returns:
JSON格式的摘要报告,包含Markdown格式内容
"""
tools = _get_tools()
result = tools['analytics'].generate_summary_report(
report_type=report_type,
date_range=date_range
)
return json.dumps(result, ensure_ascii=False, indent=2)
# ==================== 智能检索工具 ====================
@mcp.tool
async def search_news(
query: str,
search_mode: str = "keyword",
date_range: Optional[Dict[str, str]] = None,
platforms: Optional[List[str]] = None,
limit: int = 50,
sort_by: str = "relevance",
threshold: float = 0.6,
include_url: bool = False
) -> str:
"""
统一搜索接口,支持多种搜索模式
Args:
query: 搜索关键词或内容片段
search_mode: 搜索模式,可选值:
- "keyword": 精确关键词匹配(默认,适合搜索特定话题)
- "fuzzy": 模糊内容匹配(适合搜索内容片段,会过滤相似度低于阈值的结果)
- "entity": 实体名称搜索(适合搜索人物/地点/机构)
date_range: 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **示例**: {"start": "2025-01-01", "end": "2025-01-07"}
- **说明**: AI需要根据用户的自然语言(如"最近7天")自动计算日期范围
- **默认**: 不指定时默认查询今天的新闻
- **注意**: start和end可以相同(表示单日查询)
platforms: 平台ID列表,如 ['zhihu', 'weibo', 'douyin']
- 不指定时:使用 config.yaml 中配置的所有平台
- 支持的平台来自 config/config.yaml 的 platforms 配置
- 每个平台都有对应的name字段(如"知乎"、"微博"),方便AI识别
limit: 返回条数限制,默认50,最大1000
注意:实际返回数量取决于搜索匹配结果(特别是 fuzzy 模式下会过滤低相似度结果)
sort_by: 排序方式,可选值:
- "relevance": 按相关度排序(默认)
- "weight": 按新闻权重排序
- "date": 按日期排序
threshold: 相似度阈值(仅fuzzy模式有效),0-1之间,默认0.6
注意:阈值越高匹配越严格,返回结果越少
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的搜索结果,包含标题、平台、排名等信息
**重要:数据展示策略**
- 本工具返回完整的搜索结果列表
- **默认展示方式**:展示全部返回的新闻,无需总结或筛选
- 仅在用户明确要求"总结"或"挑重点"时才进行筛选
**AI使用说明:**
当用户使用相对时间表达时(如"最近7天"、"过去一周"、"最近半个月"),
AI必须根据当前日期(从环境 <env> 获取)计算出具体的 YYYY-MM-DD 格式日期。
**重要**:date_range 不接受"今天"、"昨天"等自然语言,必须是 YYYY-MM-DD 格式!
**计算规则**(假设从 <env> 获取今天是 2025-11-17):
- "今天" → 不传 date_range(默认查今天)
- "最近7天" → {"start": "2025-11-11", "end": "2025-11-17"}
- "过去一周" → {"start": "2025-11-11", "end": "2025-11-17"}
- "上周" → 计算上周一到上周日,如 {"start": "2025-11-11", "end": "2025-11-17"}
- "本月" → {"start": "2025-11-01", "end": "2025-11-17"}
- "最近30天" → {"start": "2025-10-19", "end": "2025-11-17"}
Examples (假设今天是 2025-11-17):
- 用户:"今天的AI新闻" → search_news(query="人工智能")
- 用户:"最近7天的AI新闻" → search_news(query="人工智能", date_range={"start": "2025-11-11", "end": "2025-11-17"})
- 精确日期: search_news(query="人工智能", date_range={"start": "2025-01-01", "end": "2025-01-07"})
- 模糊搜索: search_news(query="特斯拉降价", search_mode="fuzzy", threshold=0.4)
"""
tools = _get_tools()
result = tools['search'].search_news_unified(
query=query,
search_mode=search_mode,
date_range=date_range,
platforms=platforms,
limit=limit,
sort_by=sort_by,
threshold=threshold,
include_url=include_url
)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def search_related_news_history(
reference_text: str,
time_preset: str = "yesterday",
threshold: float = 0.4,
limit: int = 50,
include_url: bool = False
) -> str:
"""
基于种子新闻,在历史数据中搜索相关新闻
Args:
reference_text: 参考新闻标题(完整或部分)
time_preset: 时间范围预设值,可选:
- "yesterday": 昨天
- "last_week": 上周 (7天)
- "last_month": 上个月 (30天)
- "custom": 自定义日期范围(需要提供 start_date 和 end_date)
threshold: 相关性阈值,0-1之间,默认0.4
注意:综合相似度计算(70%关键词重合 + 30%文本相似度)
阈值越高匹配越严格,返回结果越少
limit: 返回条数限制,默认50,最大100
注意:实际返回数量取决于相关性匹配结果,可能少于请求值
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的相关新闻列表,包含相关性分数和时间分布
**重要:数据展示策略**
- 本工具返回完整的相关新闻列表
- **默认展示方式**:展示全部返回的新闻(包括相关性分数)
- 仅在用户明确要求"总结"或"挑重点"时才进行筛选
"""
tools = _get_tools()
result = tools['search'].search_related_news_history(
reference_text=reference_text,
time_preset=time_preset,
threshold=threshold,
limit=limit,
include_url=include_url
)
return json.dumps(result, ensure_ascii=False, indent=2)
# ==================== 配置与系统管理工具 ====================
@mcp.tool
async def get_current_config(
section: str = "all"
) -> str:
"""
获取当前系统配置
Args:
section: 配置节,可选值:
- "all": 所有配置(默认)
- "crawler": 爬虫配置
- "push": 推送配置
- "keywords": 关键词配置
- "weights": 权重配置
Returns:
JSON格式的配置信息
"""
tools = _get_tools()
result = tools['config'].get_current_config(section=section)
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def get_system_status() -> str:
"""
获取系统运行状态和健康检查信息
返回系统版本、数据统计、缓存状态等信息
Returns:
JSON格式的系统状态信息
"""
tools = _get_tools()
result = tools['system'].get_system_status()
return json.dumps(result, ensure_ascii=False, indent=2)
@mcp.tool
async def trigger_crawl(
platforms: Optional[List[str]] = None,
save_to_local: bool = False,
include_url: bool = False
) -> str:
"""
手动触发一次爬取任务(可选持久化)
Args:
platforms: 指定平台ID列表,如 ['zhihu', 'weibo', 'douyin']
- 不指定时:使用 config.yaml 中配置的所有平台
- 支持的平台来自 config/config.yaml 的 platforms 配置
- 每个平台都有对应的name字段(如"知乎"、"微博"),方便AI识别
- 注意:失败的平台会在返回结果的 failed_platforms 字段中列出
save_to_local: 是否保存到本地 output 目录,默认 False
include_url: 是否包含URL链接,默认False(节省token)
Returns:
JSON格式的任务状态信息,包含:
- platforms: 成功爬取的平台列表
- failed_platforms: 失败的平台列表(如有)
- total_news: 爬取的新闻总数
- data: 新闻数据
Examples:
- 临时爬取: trigger_crawl(platforms=['zhihu'])
- 爬取并保存: trigger_crawl(platforms=['weibo'], save_to_local=True)
- 使用默认平台: trigger_crawl() # 爬取config.yaml中配置的所有平台
"""
tools = _get_tools()
result = tools['system'].trigger_crawl(platforms=platforms, save_to_local=save_to_local, include_url=include_url)
return json.dumps(result, ensure_ascii=False, indent=2)
# ==================== 启动入口 ====================
def run_server(
project_root: Optional[str] = None,
transport: str = 'stdio',
host: str = '0.0.0.0',
port: int = 3333
):
"""
启动 MCP 服务器
Args:
project_root: 项目根目录路径
transport: 传输模式,'stdio' 或 'http'
host: HTTP模式的监听地址,默认 0.0.0.0
port: HTTP模式的监听端口,默认 3333
"""
# 初始化工具实例
_get_tools(project_root)
# 打印启动信息
print()
print("=" * 60)
print(" TrendRadar MCP Server - FastMCP 2.0")
print("=" * 60)
print(f" 传输模式: {transport.upper()}")
if transport == 'stdio':
print(" 协议: MCP over stdio (标准输入输出)")
print(" 说明: 通过标准输入输出与 MCP 客户端通信")
elif transport == 'http':
print(f" 监听地址: http://{host}:{port}")
print(f" HTTP端点: http://{host}:{port}/mcp")
print(" 协议: MCP over HTTP (生产环境)")
if project_root:
print(f" 项目目录: {project_root}")
else:
print(" 项目目录: 当前目录")
print()
print(" 已注册的工具:")
print(" === 基础数据查询(P0核心)===")
print(" 1. get_latest_news - 获取最新新闻")
print(" 2. get_news_by_date - 按日期查询新闻(支持自然语言)")
print(" 3. get_trending_topics - 获取趋势话题")
print()
print(" === 智能检索工具 ===")
print(" 4. search_news - 统一新闻搜索(关键词/模糊/实体)")
print(" 5. search_related_news_history - 历史相关新闻检索")
print()
print(" === 高级数据分析 ===")
print(" 6. analyze_topic_trend - 统一话题趋势分析(热度/生命周期/爆火/预测)")
print(" 7. analyze_data_insights - 统一数据洞察分析(平台对比/活跃度/关键词共现)")
print(" 8. analyze_sentiment - 情感倾向分析")
print(" 9. find_similar_news - 相似新闻查找")
print(" 10. generate_summary_report - 每日/每周摘要生成")
print()
print(" === 配置与系统管理 ===")
print(" 11. get_current_config - 获取当前系统配置")
print(" 12. get_system_status - 获取系统运行状态")
print(" 13. trigger_crawl - 手动触发爬取任务")
print("=" * 60)
print()
# 根据传输模式运行服务器
if transport == 'stdio':
mcp.run(transport='stdio')
elif transport == 'http':
# HTTP 模式(生产推荐)
mcp.run(
transport='http',
host=host,
port=port,
path='/mcp' # HTTP 端点路径
)
else:
raise ValueError(f"不支持的传输模式: {transport}")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='TrendRadar MCP Server - 新闻热点聚合 MCP 工具服务器',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
详细配置教程请查看: README-Cherry-Studio.md
"""
)
parser.add_argument(
'--transport',
choices=['stdio', 'http'],
default='stdio',
help='传输模式:stdio (默认) 或 http (生产环境)'
)
parser.add_argument(
'--host',
default='0.0.0.0',
help='HTTP模式的监听地址,默认 0.0.0.0'
)
parser.add_argument(
'--port',
type=int,
default=3333,
help='HTTP模式的监听端口,默认 3333'
)
parser.add_argument(
'--project-root',
help='项目根目录路径'
)
args = parser.parse_args()
run_server(
project_root=args.project_root,
transport=args.transport,
host=args.host,
port=args.port
)
|
2302_81331056/TrendRadar
|
mcp_server/server.py
|
Python
|
agpl-3.0
| 27,722
|
"""
服务层模块
提供数据访问、缓存、解析等核心服务。
"""
|
2302_81331056/TrendRadar
|
mcp_server/services/__init__.py
|
Python
|
agpl-3.0
| 80
|
"""
缓存服务
实现TTL缓存机制,提升数据访问性能。
"""
import time
from typing import Any, Optional
from threading import Lock
class CacheService:
"""缓存服务类"""
def __init__(self):
"""初始化缓存服务"""
self._cache = {}
self._timestamps = {}
self._lock = Lock()
def get(self, key: str, ttl: int = 900) -> Optional[Any]:
"""
获取缓存数据
Args:
key: 缓存键
ttl: 存活时间(秒),默认15分钟
Returns:
缓存的值,如果不存在或已过期则返回None
"""
with self._lock:
if key in self._cache:
# 检查是否过期
if time.time() - self._timestamps[key] < ttl:
return self._cache[key]
else:
# 已过期,删除缓存
del self._cache[key]
del self._timestamps[key]
return None
def set(self, key: str, value: Any) -> None:
"""
设置缓存数据
Args:
key: 缓存键
value: 缓存值
"""
with self._lock:
self._cache[key] = value
self._timestamps[key] = time.time()
def delete(self, key: str) -> bool:
"""
删除缓存
Args:
key: 缓存键
Returns:
是否成功删除
"""
with self._lock:
if key in self._cache:
del self._cache[key]
del self._timestamps[key]
return True
return False
def clear(self) -> None:
"""清空所有缓存"""
with self._lock:
self._cache.clear()
self._timestamps.clear()
def cleanup_expired(self, ttl: int = 900) -> int:
"""
清理过期缓存
Args:
ttl: 存活时间(秒)
Returns:
清理的条目数量
"""
with self._lock:
current_time = time.time()
expired_keys = [
key for key, timestamp in self._timestamps.items()
if current_time - timestamp >= ttl
]
for key in expired_keys:
del self._cache[key]
del self._timestamps[key]
return len(expired_keys)
def get_stats(self) -> dict:
"""
获取缓存统计信息
Returns:
统计信息字典
"""
with self._lock:
return {
"total_entries": len(self._cache),
"oldest_entry_age": (
time.time() - min(self._timestamps.values())
if self._timestamps else 0
),
"newest_entry_age": (
time.time() - max(self._timestamps.values())
if self._timestamps else 0
)
}
# 全局缓存实例
_global_cache = None
def get_cache() -> CacheService:
"""
获取全局缓存实例
Returns:
全局缓存服务实例
"""
global _global_cache
if _global_cache is None:
_global_cache = CacheService()
return _global_cache
|
2302_81331056/TrendRadar
|
mcp_server/services/cache_service.py
|
Python
|
agpl-3.0
| 3,290
|
"""
数据访问服务
提供统一的数据查询接口,封装数据访问逻辑。
"""
import re
from collections import Counter
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from .cache_service import get_cache
from .parser_service import ParserService
from ..utils.errors import DataNotFoundError
class DataService:
"""数据访问服务类"""
def __init__(self, project_root: str = None):
"""
初始化数据服务
Args:
project_root: 项目根目录
"""
self.parser = ParserService(project_root)
self.cache = get_cache()
def get_latest_news(
self,
platforms: Optional[List[str]] = None,
limit: int = 50,
include_url: bool = False
) -> List[Dict]:
"""
获取最新一批爬取的新闻数据
Args:
platforms: 平台ID列表,None表示所有平台
limit: 返回条数限制
include_url: 是否包含URL链接,默认False(节省token)
Returns:
新闻列表
Raises:
DataNotFoundError: 数据不存在
"""
# 尝试从缓存获取
cache_key = f"latest_news:{','.join(platforms or [])}:{limit}:{include_url}"
cached = self.cache.get(cache_key, ttl=900) # 15分钟缓存
if cached:
return cached
# 读取今天的数据
all_titles, id_to_name, timestamps = self.parser.read_all_titles_for_date(
date=None,
platform_ids=platforms
)
# 获取最新的文件时间
if timestamps:
latest_timestamp = max(timestamps.values())
fetch_time = datetime.fromtimestamp(latest_timestamp)
else:
fetch_time = datetime.now()
# 转换为新闻列表
news_list = []
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 取第一个排名
rank = info["ranks"][0] if info["ranks"] else 0
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"rank": rank,
"timestamp": fetch_time.strftime("%Y-%m-%d %H:%M:%S")
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
news_list.append(news_item)
# 按排名排序
news_list.sort(key=lambda x: x["rank"])
# 限制返回数量
result = news_list[:limit]
# 缓存结果
self.cache.set(cache_key, result)
return result
def get_news_by_date(
self,
target_date: datetime,
platforms: Optional[List[str]] = None,
limit: int = 50,
include_url: bool = False
) -> List[Dict]:
"""
按指定日期获取新闻
Args:
target_date: 目标日期
platforms: 平台ID列表,None表示所有平台
limit: 返回条数限制
include_url: 是否包含URL链接,默认False(节省token)
Returns:
新闻列表
Raises:
DataNotFoundError: 数据不存在
Examples:
>>> service = DataService()
>>> news = service.get_news_by_date(
... target_date=datetime(2025, 10, 10),
... platforms=['zhihu'],
... limit=20
... )
"""
# 尝试从缓存获取
date_str = target_date.strftime("%Y-%m-%d")
cache_key = f"news_by_date:{date_str}:{','.join(platforms or [])}:{limit}:{include_url}"
cached = self.cache.get(cache_key, ttl=1800) # 30分钟缓存
if cached:
return cached
# 读取指定日期的数据
all_titles, id_to_name, timestamps = self.parser.read_all_titles_for_date(
date=target_date,
platform_ids=platforms
)
# 转换为新闻列表
news_list = []
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 计算平均排名
avg_rank = sum(info["ranks"]) / len(info["ranks"]) if info["ranks"] else 0
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"rank": info["ranks"][0] if info["ranks"] else 0,
"avg_rank": round(avg_rank, 2),
"count": len(info["ranks"]),
"date": date_str
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
news_list.append(news_item)
# 按排名排序
news_list.sort(key=lambda x: x["rank"])
# 限制返回数量
result = news_list[:limit]
# 缓存结果(历史数据缓存更久)
self.cache.set(cache_key, result)
return result
def search_news_by_keyword(
self,
keyword: str,
date_range: Optional[Tuple[datetime, datetime]] = None,
platforms: Optional[List[str]] = None,
limit: Optional[int] = None
) -> Dict:
"""
按关键词搜索新闻
Args:
keyword: 搜索关键词
date_range: 日期范围 (start_date, end_date)
platforms: 平台过滤列表
limit: 返回条数限制(可选)
Returns:
搜索结果字典
Raises:
DataNotFoundError: 数据不存在
"""
# 确定搜索日期范围
if date_range:
start_date, end_date = date_range
else:
# 默认搜索今天
start_date = end_date = datetime.now()
# 收集所有匹配的新闻
results = []
platform_distribution = Counter()
# 遍历日期范围
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, _ = self.parser.read_all_titles_for_date(
date=current_date,
platform_ids=platforms
)
# 搜索包含关键词的标题
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
if keyword.lower() in title.lower():
# 计算平均排名
avg_rank = sum(info["ranks"]) / len(info["ranks"]) if info["ranks"] else 0
results.append({
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"ranks": info["ranks"],
"count": len(info["ranks"]),
"avg_rank": round(avg_rank, 2),
"url": info.get("url", ""),
"mobileUrl": info.get("mobileUrl", ""),
"date": current_date.strftime("%Y-%m-%d")
})
platform_distribution[platform_id] += 1
except DataNotFoundError:
# 该日期没有数据,继续下一天
pass
# 下一天
current_date += timedelta(days=1)
if not results:
raise DataNotFoundError(
f"未找到包含关键词 '{keyword}' 的新闻",
suggestion="请尝试其他关键词或扩大日期范围"
)
# 计算统计信息
total_ranks = []
for item in results:
total_ranks.extend(item["ranks"])
avg_rank = sum(total_ranks) / len(total_ranks) if total_ranks else 0
# 限制返回数量(如果指定)
total_found = len(results)
if limit is not None and limit > 0:
results = results[:limit]
return {
"results": results,
"total": len(results),
"total_found": total_found,
"statistics": {
"platform_distribution": dict(platform_distribution),
"avg_rank": round(avg_rank, 2),
"keyword": keyword
}
}
def get_trending_topics(
self,
top_n: int = 10,
mode: str = "current"
) -> Dict:
"""
获取个人关注词的新闻出现频率统计
注意:本工具基于 config/frequency_words.txt 中的个人关注词列表进行统计,
而不是自动从新闻中提取热点话题。用户可以自定义这个关注词列表。
Args:
top_n: 返回TOP N关注词
mode: 模式 - daily(当日累计), current(最新一批)
Returns:
关注词频率统计字典
Raises:
DataNotFoundError: 数据不存在
"""
# 尝试从缓存获取
cache_key = f"trending_topics:{top_n}:{mode}"
cached = self.cache.get(cache_key, ttl=1800) # 30分钟缓存
if cached:
return cached
# 读取今天的数据
all_titles, id_to_name, timestamps = self.parser.read_all_titles_for_date()
if not all_titles:
raise DataNotFoundError(
"未找到今天的新闻数据",
suggestion="请确保爬虫已经运行并生成了数据"
)
# 加载关键词配置
word_groups = self.parser.parse_frequency_words()
# 根据mode选择要处理的标题数据
titles_to_process = {}
if mode == "daily":
# daily模式:处理当天所有累计数据
titles_to_process = all_titles
elif mode == "current":
# current模式:只处理最新一批数据(最新时间戳的文件)
if timestamps:
# 找出最新的时间戳
latest_timestamp = max(timestamps.values())
# 重新读取,只获取最新时间的数据
# 这里我们通过timestamps字典反查找最新文件对应的平台
latest_titles, _, _ = self.parser.read_all_titles_for_date()
# 由于read_all_titles_for_date返回所有文件的合并数据,
# 我们需要通过timestamps来过滤出最新批次
# 简化实现:使用当前所有数据作为最新批次
# (更精确的实现需要解析服务支持按时间过滤)
titles_to_process = latest_titles
else:
titles_to_process = all_titles
else:
raise ValueError(
f"不支持的模式: {mode}。支持的模式: daily, current"
)
# 统计词频
word_frequency = Counter()
keyword_to_news = {}
# 遍历要处理的标题
for platform_id, titles in titles_to_process.items():
for title in titles.keys():
# 对每个关键词组进行匹配
for group in word_groups:
all_words = group.get("required", []) + group.get("normal", [])
for word in all_words:
if word and word in title:
word_frequency[word] += 1
if word not in keyword_to_news:
keyword_to_news[word] = []
keyword_to_news[word].append(title)
# 获取TOP N关键词
top_keywords = word_frequency.most_common(top_n)
# 构建话题列表
topics = []
for keyword, frequency in top_keywords:
matched_news = keyword_to_news.get(keyword, [])
topics.append({
"keyword": keyword,
"frequency": frequency,
"matched_news": len(set(matched_news)), # 去重后的新闻数量
"trend": "stable", # TODO: 需要历史数据来计算趋势
"weight_score": 0.0 # TODO: 需要实现权重计算
})
# 构建结果
result = {
"topics": topics,
"generated_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"mode": mode,
"total_keywords": len(word_frequency),
"description": self._get_mode_description(mode)
}
# 缓存结果
self.cache.set(cache_key, result)
return result
def _get_mode_description(self, mode: str) -> str:
"""获取模式描述"""
descriptions = {
"daily": "当日累计统计",
"current": "最新一批统计"
}
return descriptions.get(mode, "未知模式")
def get_current_config(self, section: str = "all") -> Dict:
"""
获取当前系统配置
Args:
section: 配置节 - all/crawler/push/keywords/weights
Returns:
配置字典
Raises:
FileParseError: 配置文件解析错误
"""
# 尝试从缓存获取
cache_key = f"config:{section}"
cached = self.cache.get(cache_key, ttl=3600) # 1小时缓存
if cached:
return cached
# 解析配置文件
config_data = self.parser.parse_yaml_config()
word_groups = self.parser.parse_frequency_words()
# 根据section返回对应配置
if section == "all" or section == "crawler":
crawler_config = {
"enable_crawler": config_data.get("crawler", {}).get("enable_crawler", True),
"use_proxy": config_data.get("crawler", {}).get("use_proxy", False),
"request_interval": config_data.get("crawler", {}).get("request_interval", 1),
"retry_times": 3,
"platforms": [p["id"] for p in config_data.get("platforms", [])]
}
if section == "all" or section == "push":
push_config = {
"enable_notification": config_data.get("notification", {}).get("enable_notification", True),
"enabled_channels": [],
"message_batch_size": config_data.get("notification", {}).get("message_batch_size", 20),
"push_window": config_data.get("notification", {}).get("push_window", {})
}
# 检测已配置的通知渠道
webhooks = config_data.get("notification", {}).get("webhooks", {})
if webhooks.get("feishu_url"):
push_config["enabled_channels"].append("feishu")
if webhooks.get("dingtalk_url"):
push_config["enabled_channels"].append("dingtalk")
if webhooks.get("wework_url"):
push_config["enabled_channels"].append("wework")
if section == "all" or section == "keywords":
keywords_config = {
"word_groups": word_groups,
"total_groups": len(word_groups)
}
if section == "all" or section == "weights":
weights_config = {
"rank_weight": config_data.get("weight", {}).get("rank_weight", 0.6),
"frequency_weight": config_data.get("weight", {}).get("frequency_weight", 0.3),
"hotness_weight": config_data.get("weight", {}).get("hotness_weight", 0.1)
}
# 组装结果
if section == "all":
result = {
"crawler": crawler_config,
"push": push_config,
"keywords": keywords_config,
"weights": weights_config
}
elif section == "crawler":
result = crawler_config
elif section == "push":
result = push_config
elif section == "keywords":
result = keywords_config
elif section == "weights":
result = weights_config
else:
result = {}
# 缓存结果
self.cache.set(cache_key, result)
return result
def get_available_date_range(self) -> Tuple[Optional[datetime], Optional[datetime]]:
"""
扫描 output 目录,返回实际可用的日期范围
Returns:
(最早日期, 最新日期) 元组,如果没有数据则返回 (None, None)
Examples:
>>> service = DataService()
>>> earliest, latest = service.get_available_date_range()
>>> print(f"可用日期范围:{earliest} 至 {latest}")
"""
output_dir = self.parser.project_root / "output"
if not output_dir.exists():
return (None, None)
available_dates = []
# 遍历日期文件夹
for date_folder in output_dir.iterdir():
if date_folder.is_dir() and not date_folder.name.startswith('.'):
# 解析日期(格式: YYYY年MM月DD日)
try:
date_match = re.match(r'(\d{4})年(\d{2})月(\d{2})日', date_folder.name)
if date_match:
folder_date = datetime(
int(date_match.group(1)),
int(date_match.group(2)),
int(date_match.group(3))
)
available_dates.append(folder_date)
except Exception:
pass
if not available_dates:
return (None, None)
return (min(available_dates), max(available_dates))
def get_system_status(self) -> Dict:
"""
获取系统运行状态
Returns:
系统状态字典
"""
# 获取数据统计
output_dir = self.parser.project_root / "output"
total_storage = 0
oldest_record = None
latest_record = None
total_news = 0
if output_dir.exists():
# 遍历日期文件夹
for date_folder in output_dir.iterdir():
if date_folder.is_dir():
# 解析日期
try:
date_str = date_folder.name
# 格式: YYYY年MM月DD日
date_match = re.match(r'(\d{4})年(\d{2})月(\d{2})日', date_str)
if date_match:
folder_date = datetime(
int(date_match.group(1)),
int(date_match.group(2)),
int(date_match.group(3))
)
if oldest_record is None or folder_date < oldest_record:
oldest_record = folder_date
if latest_record is None or folder_date > latest_record:
latest_record = folder_date
except:
pass
# 计算存储大小
for item in date_folder.rglob("*"):
if item.is_file():
total_storage += item.stat().st_size
# 读取版本信息
version_file = self.parser.project_root / "version"
version = "unknown"
if version_file.exists():
try:
with open(version_file, "r") as f:
version = f.read().strip()
except:
pass
return {
"system": {
"version": version,
"project_root": str(self.parser.project_root)
},
"data": {
"total_storage": f"{total_storage / 1024 / 1024:.2f} MB",
"oldest_record": oldest_record.strftime("%Y-%m-%d") if oldest_record else None,
"latest_record": latest_record.strftime("%Y-%m-%d") if latest_record else None,
},
"cache": self.cache.get_stats(),
"health": "healthy"
}
|
2302_81331056/TrendRadar
|
mcp_server/services/data_service.py
|
Python
|
agpl-3.0
| 20,738
|
"""
文件解析服务
提供txt格式新闻数据和YAML配置文件的解析功能。
"""
import re
from pathlib import Path
from typing import Dict, List, Tuple, Optional
from datetime import datetime
import yaml
from ..utils.errors import FileParseError, DataNotFoundError
from .cache_service import get_cache
class ParserService:
"""文件解析服务类"""
def __init__(self, project_root: str = None):
"""
初始化解析服务
Args:
project_root: 项目根目录,默认为当前目录的父目录
"""
if project_root is None:
# 获取当前文件所在目录的父目录的父目录
current_file = Path(__file__)
self.project_root = current_file.parent.parent.parent
else:
self.project_root = Path(project_root)
# 初始化缓存服务
self.cache = get_cache()
@staticmethod
def clean_title(title: str) -> str:
"""
清理标题文本
Args:
title: 原始标题
Returns:
清理后的标题
"""
# 移除多余空白
title = re.sub(r'\s+', ' ', title)
# 移除特殊字符
title = title.strip()
return title
def parse_txt_file(self, file_path: Path) -> Tuple[Dict, Dict]:
"""
解析单个txt文件的标题数据
Args:
file_path: txt文件路径
Returns:
(titles_by_id, id_to_name) 元组
- titles_by_id: {platform_id: {title: {ranks, url, mobileUrl}}}
- id_to_name: {platform_id: platform_name}
Raises:
FileParseError: 文件解析错误
"""
if not file_path.exists():
raise FileParseError(str(file_path), "文件不存在")
titles_by_id = {}
id_to_name = {}
try:
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
sections = content.split("\n\n")
for section in sections:
if not section.strip() or "==== 以下ID请求失败 ====" in section:
continue
lines = section.strip().split("\n")
if len(lines) < 2:
continue
# 解析header: id | name 或 id
header_line = lines[0].strip()
if " | " in header_line:
parts = header_line.split(" | ", 1)
source_id = parts[0].strip()
name = parts[1].strip()
id_to_name[source_id] = name
else:
source_id = header_line
id_to_name[source_id] = source_id
titles_by_id[source_id] = {}
# 解析标题行
for line in lines[1:]:
if line.strip():
try:
title_part = line.strip()
rank = None
# 提取排名
if ". " in title_part and title_part.split(". ")[0].isdigit():
rank_str, title_part = title_part.split(". ", 1)
rank = int(rank_str)
# 提取 MOBILE URL
mobile_url = ""
if " [MOBILE:" in title_part:
title_part, mobile_part = title_part.rsplit(" [MOBILE:", 1)
if mobile_part.endswith("]"):
mobile_url = mobile_part[:-1]
# 提取 URL
url = ""
if " [URL:" in title_part:
title_part, url_part = title_part.rsplit(" [URL:", 1)
if url_part.endswith("]"):
url = url_part[:-1]
title = self.clean_title(title_part.strip())
ranks = [rank] if rank is not None else [1]
titles_by_id[source_id][title] = {
"ranks": ranks,
"url": url,
"mobileUrl": mobile_url,
}
except Exception as e:
# 忽略单行解析错误
continue
except Exception as e:
raise FileParseError(str(file_path), str(e))
return titles_by_id, id_to_name
def get_date_folder_name(self, date: datetime = None) -> str:
"""
获取日期文件夹名称
Args:
date: 日期对象,默认为今天
Returns:
文件夹名称,格式: YYYY年MM月DD日
"""
if date is None:
date = datetime.now()
return date.strftime("%Y年%m月%d日")
def read_all_titles_for_date(
self,
date: datetime = None,
platform_ids: Optional[List[str]] = None
) -> Tuple[Dict, Dict, Dict]:
"""
读取指定日期的所有标题文件(带缓存)
Args:
date: 日期对象,默认为今天
platform_ids: 平台ID列表,None表示所有平台
Returns:
(all_titles, id_to_name, all_timestamps) 元组
- all_titles: {platform_id: {title: {ranks, url, mobileUrl, ...}}}
- id_to_name: {platform_id: platform_name}
- all_timestamps: {filename: timestamp}
Raises:
DataNotFoundError: 数据不存在
"""
# 生成缓存键
date_str = self.get_date_folder_name(date)
platform_key = ','.join(sorted(platform_ids)) if platform_ids else 'all'
cache_key = f"read_all_titles:{date_str}:{platform_key}"
# 尝试从缓存获取
# 对于历史数据(非今天),使用更长的缓存时间(1小时)
# 对于今天的数据,使用较短的缓存时间(15分钟),因为可能有新数据
is_today = (date is None) or (date.date() == datetime.now().date())
ttl = 900 if is_today else 3600 # 15分钟 vs 1小时
cached = self.cache.get(cache_key, ttl=ttl)
if cached:
return cached
# 缓存未命中,读取文件
date_folder = self.get_date_folder_name(date)
txt_dir = self.project_root / "output" / date_folder / "txt"
if not txt_dir.exists():
raise DataNotFoundError(
f"未找到 {date_folder} 的数据目录",
suggestion="请先运行爬虫或检查日期是否正确"
)
all_titles = {}
id_to_name = {}
all_timestamps = {}
# 读取所有txt文件
txt_files = sorted(txt_dir.glob("*.txt"))
if not txt_files:
raise DataNotFoundError(
f"{date_folder} 没有数据文件",
suggestion="请等待爬虫任务完成"
)
for txt_file in txt_files:
try:
titles_by_id, file_id_to_name = self.parse_txt_file(txt_file)
# 更新id_to_name
id_to_name.update(file_id_to_name)
# 合并标题数据
for platform_id, titles in titles_by_id.items():
# 如果指定了平台过滤
if platform_ids and platform_id not in platform_ids:
continue
if platform_id not in all_titles:
all_titles[platform_id] = {}
for title, info in titles.items():
if title in all_titles[platform_id]:
# 合并排名
all_titles[platform_id][title]["ranks"].extend(info["ranks"])
else:
all_titles[platform_id][title] = info.copy()
# 记录文件时间戳
all_timestamps[txt_file.name] = txt_file.stat().st_mtime
except Exception as e:
# 忽略单个文件的解析错误,继续处理其他文件
print(f"Warning: 解析文件 {txt_file} 失败: {e}")
continue
if not all_titles:
raise DataNotFoundError(
f"{date_folder} 没有有效的数据",
suggestion="请检查数据文件格式或重新运行爬虫"
)
# 缓存结果
result = (all_titles, id_to_name, all_timestamps)
self.cache.set(cache_key, result)
return result
def parse_yaml_config(self, config_path: str = None) -> dict:
"""
解析YAML配置文件
Args:
config_path: 配置文件路径,默认为 config/config.yaml
Returns:
配置字典
Raises:
FileParseError: 配置文件解析错误
"""
if config_path is None:
config_path = self.project_root / "config" / "config.yaml"
else:
config_path = Path(config_path)
if not config_path.exists():
raise FileParseError(str(config_path), "配置文件不存在")
try:
with open(config_path, "r", encoding="utf-8") as f:
config_data = yaml.safe_load(f)
return config_data
except Exception as e:
raise FileParseError(str(config_path), str(e))
def parse_frequency_words(self, words_file: str = None) -> List[Dict]:
"""
解析关键词配置文件
Args:
words_file: 关键词文件路径,默认为 config/frequency_words.txt
Returns:
词组列表
Raises:
FileParseError: 文件解析错误
"""
if words_file is None:
words_file = self.project_root / "config" / "frequency_words.txt"
else:
words_file = Path(words_file)
if not words_file.exists():
return []
word_groups = []
try:
with open(words_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
# 使用 | 分隔符
parts = [p.strip() for p in line.split("|")]
if not parts:
continue
group = {
"required": [],
"normal": [],
"filter_words": []
}
for part in parts:
if not part:
continue
words = [w.strip() for w in part.split(",")]
for word in words:
if not word:
continue
if word.endswith("+"):
# 必须词
group["required"].append(word[:-1])
elif word.endswith("!"):
# 过滤词
group["filter_words"].append(word[:-1])
else:
# 普通词
group["normal"].append(word)
if group["required"] or group["normal"]:
word_groups.append(group)
except Exception as e:
raise FileParseError(str(words_file), str(e))
return word_groups
|
2302_81331056/TrendRadar
|
mcp_server/services/parser_service.py
|
Python
|
agpl-3.0
| 12,027
|
"""
MCP 工具模块
包含所有MCP工具的实现。
"""
|
2302_81331056/TrendRadar
|
mcp_server/tools/__init__.py
|
Python
|
agpl-3.0
| 60
|
"""
高级数据分析工具
提供热度趋势分析、平台对比、关键词共现、情感分析等高级分析功能。
"""
import re
from collections import Counter, defaultdict
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from difflib import SequenceMatcher
from ..services.data_service import DataService
from ..utils.validators import (
validate_platforms,
validate_limit,
validate_keyword,
validate_top_n,
validate_date_range
)
from ..utils.errors import MCPError, InvalidParameterError, DataNotFoundError
def calculate_news_weight(news_data: Dict, rank_threshold: int = 5) -> float:
"""
计算新闻权重(用于排序)
基于 main.py 的权重算法实现,综合考虑:
- 排名权重 (60%):新闻在榜单中的排名
- 频次权重 (30%):新闻出现的次数
- 热度权重 (10%):高排名出现的比例
Args:
news_data: 新闻数据字典,包含 ranks 和 count 字段
rank_threshold: 高排名阈值,默认5
Returns:
权重分数(0-100之间的浮点数)
"""
ranks = news_data.get("ranks", [])
if not ranks:
return 0.0
count = news_data.get("count", len(ranks))
# 权重配置(与 config.yaml 保持一致)
RANK_WEIGHT = 0.6
FREQUENCY_WEIGHT = 0.3
HOTNESS_WEIGHT = 0.1
# 1. 排名权重:Σ(11 - min(rank, 10)) / 出现次数
rank_scores = []
for rank in ranks:
score = 11 - min(rank, 10)
rank_scores.append(score)
rank_weight = sum(rank_scores) / len(ranks) if ranks else 0
# 2. 频次权重:min(出现次数, 10) × 10
frequency_weight = min(count, 10) * 10
# 3. 热度加成:高排名次数 / 总出现次数 × 100
high_rank_count = sum(1 for rank in ranks if rank <= rank_threshold)
hotness_ratio = high_rank_count / len(ranks) if ranks else 0
hotness_weight = hotness_ratio * 100
# 综合权重
total_weight = (
rank_weight * RANK_WEIGHT
+ frequency_weight * FREQUENCY_WEIGHT
+ hotness_weight * HOTNESS_WEIGHT
)
return total_weight
class AnalyticsTools:
"""高级数据分析工具类"""
def __init__(self, project_root: str = None):
"""
初始化分析工具
Args:
project_root: 项目根目录
"""
self.data_service = DataService(project_root)
def analyze_data_insights_unified(
self,
insight_type: str = "platform_compare",
topic: Optional[str] = None,
date_range: Optional[Dict[str, str]] = None,
min_frequency: int = 3,
top_n: int = 20
) -> Dict:
"""
统一数据洞察分析工具 - 整合多种数据分析模式
Args:
insight_type: 洞察类型,可选值:
- "platform_compare": 平台对比分析(对比不同平台对话题的关注度)
- "platform_activity": 平台活跃度统计(统计各平台发布频率和活跃时间)
- "keyword_cooccur": 关键词共现分析(分析关键词同时出现的模式)
topic: 话题关键词(可选,platform_compare模式适用)
date_range: 日期范围,格式: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
min_frequency: 最小共现频次(keyword_cooccur模式),默认3
top_n: 返回TOP N结果(keyword_cooccur模式),默认20
Returns:
数据洞察分析结果字典
Examples:
- analyze_data_insights_unified(insight_type="platform_compare", topic="人工智能")
- analyze_data_insights_unified(insight_type="platform_activity", date_range={...})
- analyze_data_insights_unified(insight_type="keyword_cooccur", min_frequency=5)
"""
try:
# 参数验证
if insight_type not in ["platform_compare", "platform_activity", "keyword_cooccur"]:
raise InvalidParameterError(
f"无效的洞察类型: {insight_type}",
suggestion="支持的类型: platform_compare, platform_activity, keyword_cooccur"
)
# 根据洞察类型调用相应方法
if insight_type == "platform_compare":
return self.compare_platforms(
topic=topic,
date_range=date_range
)
elif insight_type == "platform_activity":
return self.get_platform_activity_stats(
date_range=date_range
)
else: # keyword_cooccur
return self.analyze_keyword_cooccurrence(
min_frequency=min_frequency,
top_n=top_n
)
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def analyze_topic_trend_unified(
self,
topic: str,
analysis_type: str = "trend",
date_range: Optional[Dict[str, str]] = None,
granularity: str = "day",
threshold: float = 3.0,
time_window: int = 24,
lookahead_hours: int = 6,
confidence_threshold: float = 0.7
) -> Dict:
"""
统一话题趋势分析工具 - 整合多种趋势分析模式
Args:
topic: 话题关键词(必需)
analysis_type: 分析类型,可选值:
- "trend": 热度趋势分析(追踪话题的热度变化)
- "lifecycle": 生命周期分析(从出现到消失的完整周期)
- "viral": 异常热度检测(识别突然爆火的话题)
- "predict": 话题预测(预测未来可能的热点)
date_range: 日期范围(trend和lifecycle模式),可选
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **默认**: 不指定时默认分析最近7天
granularity: 时间粒度(trend模式),默认"day"(hour/day)
threshold: 热度突增倍数阈值(viral模式),默认3.0
time_window: 检测时间窗口小时数(viral模式),默认24
lookahead_hours: 预测未来小时数(predict模式),默认6
confidence_threshold: 置信度阈值(predict模式),默认0.7
Returns:
趋势分析结果字典
Examples (假设今天是 2025-11-17):
- 用户:"分析AI最近7天的趋势" → analyze_topic_trend_unified(topic="人工智能", analysis_type="trend", date_range={"start": "2025-11-11", "end": "2025-11-17"})
- 用户:"看看特斯拉本月的热度" → analyze_topic_trend_unified(topic="特斯拉", analysis_type="lifecycle", date_range={"start": "2025-11-01", "end": "2025-11-17"})
- analyze_topic_trend_unified(topic="比特币", analysis_type="viral", threshold=3.0)
- analyze_topic_trend_unified(topic="ChatGPT", analysis_type="predict", lookahead_hours=6)
"""
try:
# 参数验证
topic = validate_keyword(topic)
if analysis_type not in ["trend", "lifecycle", "viral", "predict"]:
raise InvalidParameterError(
f"无效的分析类型: {analysis_type}",
suggestion="支持的类型: trend, lifecycle, viral, predict"
)
# 根据分析类型调用相应方法
if analysis_type == "trend":
return self.get_topic_trend_analysis(
topic=topic,
date_range=date_range,
granularity=granularity
)
elif analysis_type == "lifecycle":
return self.analyze_topic_lifecycle(
topic=topic,
date_range=date_range
)
elif analysis_type == "viral":
# viral模式不需要topic参数,使用通用检测
return self.detect_viral_topics(
threshold=threshold,
time_window=time_window
)
else: # predict
# predict模式不需要topic参数,使用通用预测
return self.predict_trending_topics(
lookahead_hours=lookahead_hours,
confidence_threshold=confidence_threshold
)
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def get_topic_trend_analysis(
self,
topic: str,
date_range: Optional[Dict[str, str]] = None,
granularity: str = "day"
) -> Dict:
"""
热度趋势分析 - 追踪特定话题的热度变化趋势
Args:
topic: 话题关键词
date_range: 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **默认**: 不指定时默认分析最近7天
granularity: 时间粒度,仅支持 day(天)
Returns:
趋势分析结果字典
Examples:
用户询问示例:
- "帮我分析一下'人工智能'这个话题最近一周的热度趋势"
- "查看'比特币'过去一周的热度变化"
- "看看'iPhone'最近7天的趋势如何"
- "分析'特斯拉'最近一个月的热度趋势"
- "查看'ChatGPT'2024年12月的趋势变化"
代码调用示例:
>>> tools = AnalyticsTools()
>>> # 分析7天趋势(假设今天是 2025-11-17)
>>> result = tools.get_topic_trend_analysis(
... topic="人工智能",
... date_range={"start": "2025-11-11", "end": "2025-11-17"},
... granularity="day"
... )
>>> # 分析历史月份趋势
>>> result = tools.get_topic_trend_analysis(
... topic="特斯拉",
... date_range={"start": "2024-12-01", "end": "2024-12-31"},
... granularity="day"
... )
>>> print(result['trend_data'])
"""
try:
# 验证参数
topic = validate_keyword(topic)
# 验证粒度参数(只支持day)
if granularity != "day":
from ..utils.errors import InvalidParameterError
raise InvalidParameterError(
f"不支持的粒度参数: {granularity}",
suggestion="当前仅支持 'day' 粒度,因为底层数据按天聚合"
)
# 处理日期范围(不指定时默认最近7天)
if date_range:
from ..utils.validators import validate_date_range
date_range_tuple = validate_date_range(date_range)
start_date, end_date = date_range_tuple
else:
# 默认最近7天
end_date = datetime.now()
start_date = end_date - timedelta(days=6)
# 收集趋势数据
trend_data = []
current_date = start_date
while current_date <= end_date:
try:
all_titles, _, _ = self.data_service.parser.read_all_titles_for_date(
date=current_date
)
# 统计该时间点的话题出现次数
count = 0
matched_titles = []
for _, titles in all_titles.items():
for title in titles.keys():
if topic.lower() in title.lower():
count += 1
matched_titles.append(title)
trend_data.append({
"date": current_date.strftime("%Y-%m-%d"),
"count": count,
"sample_titles": matched_titles[:3] # 只保留前3个样本
})
except DataNotFoundError:
trend_data.append({
"date": current_date.strftime("%Y-%m-%d"),
"count": 0,
"sample_titles": []
})
# 按天增加时间
current_date += timedelta(days=1)
# 计算趋势指标
counts = [item["count"] for item in trend_data]
total_days = (end_date - start_date).days + 1
if len(counts) >= 2:
# 计算涨跌幅度
first_non_zero = next((c for c in counts if c > 0), 0)
last_count = counts[-1]
if first_non_zero > 0:
change_rate = ((last_count - first_non_zero) / first_non_zero) * 100
else:
change_rate = 0
# 找到峰值时间
max_count = max(counts)
peak_index = counts.index(max_count)
peak_time = trend_data[peak_index]["date"]
else:
change_rate = 0
peak_time = None
max_count = 0
return {
"success": True,
"topic": topic,
"date_range": {
"start": start_date.strftime("%Y-%m-%d"),
"end": end_date.strftime("%Y-%m-%d"),
"total_days": total_days
},
"granularity": granularity,
"trend_data": trend_data,
"statistics": {
"total_mentions": sum(counts),
"average_mentions": round(sum(counts) / len(counts), 2) if counts else 0,
"peak_count": max_count,
"peak_time": peak_time,
"change_rate": round(change_rate, 2)
},
"trend_direction": "上升" if change_rate > 10 else "下降" if change_rate < -10 else "稳定"
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def compare_platforms(
self,
topic: Optional[str] = None,
date_range: Optional[Dict[str, str]] = None
) -> Dict:
"""
平台对比分析 - 对比不同平台对同一话题的关注度
Args:
topic: 话题关键词(可选,不指定则对比整体活跃度)
date_range: 日期范围,格式: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
Returns:
平台对比分析结果
Examples:
用户询问示例:
- "对比一下各个平台对'人工智能'话题的关注度"
- "看看知乎和微博哪个平台更关注科技新闻"
- "分析各平台今天的热点分布"
代码调用示例:
>>> # 对比各平台(假设今天是 2025-11-17)
>>> result = tools.compare_platforms(
... topic="人工智能",
... date_range={"start": "2025-11-08", "end": "2025-11-17"}
... )
>>> print(result['platform_stats'])
"""
try:
# 参数验证
if topic:
topic = validate_keyword(topic)
date_range_tuple = validate_date_range(date_range)
# 确定日期范围
if date_range_tuple:
start_date, end_date = date_range_tuple
else:
start_date = end_date = datetime.now()
# 收集各平台数据
platform_stats = defaultdict(lambda: {
"total_news": 0,
"topic_mentions": 0,
"unique_titles": set(),
"top_keywords": Counter()
})
# 遍历日期范围
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date(
date=current_date
)
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title in titles.keys():
platform_stats[platform_name]["total_news"] += 1
platform_stats[platform_name]["unique_titles"].add(title)
# 如果指定了话题,统计包含话题的新闻
if topic and topic.lower() in title.lower():
platform_stats[platform_name]["topic_mentions"] += 1
# 提取关键词(简单分词)
keywords = self._extract_keywords(title)
platform_stats[platform_name]["top_keywords"].update(keywords)
except DataNotFoundError:
pass
current_date += timedelta(days=1)
# 转换为可序列化的格式
result_stats = {}
for platform, stats in platform_stats.items():
coverage_rate = 0
if stats["total_news"] > 0:
coverage_rate = (stats["topic_mentions"] / stats["total_news"]) * 100
result_stats[platform] = {
"total_news": stats["total_news"],
"topic_mentions": stats["topic_mentions"],
"unique_titles": len(stats["unique_titles"]),
"coverage_rate": round(coverage_rate, 2),
"top_keywords": [
{"keyword": k, "count": v}
for k, v in stats["top_keywords"].most_common(5)
]
}
# 找出各平台独有的热点
unique_topics = self._find_unique_topics(platform_stats)
return {
"success": True,
"topic": topic,
"date_range": {
"start": start_date.strftime("%Y-%m-%d"),
"end": end_date.strftime("%Y-%m-%d")
},
"platform_stats": result_stats,
"unique_topics": unique_topics,
"total_platforms": len(result_stats)
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def analyze_keyword_cooccurrence(
self,
min_frequency: int = 3,
top_n: int = 20
) -> Dict:
"""
关键词共现分析 - 分析哪些关键词经常同时出现
Args:
min_frequency: 最小共现频次
top_n: 返回TOP N关键词对
Returns:
关键词共现分析结果
Examples:
用户询问示例:
- "分析一下哪些关键词经常一起出现"
- "看看'人工智能'经常和哪些词一起出现"
- "找出今天新闻中的关键词关联"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.analyze_keyword_cooccurrence(
... min_frequency=5,
... top_n=15
... )
>>> print(result['cooccurrence_pairs'])
"""
try:
# 参数验证
min_frequency = validate_limit(min_frequency, default=3, max_limit=100)
top_n = validate_top_n(top_n, default=20)
# 读取今天的数据
all_titles, _, _ = self.data_service.parser.read_all_titles_for_date()
# 关键词共现统计
cooccurrence = Counter()
keyword_titles = defaultdict(list)
for platform_id, titles in all_titles.items():
for title in titles.keys():
# 提取关键词
keywords = self._extract_keywords(title)
# 记录每个关键词出现的标题
for kw in keywords:
keyword_titles[kw].append(title)
# 计算两两共现
if len(keywords) >= 2:
for i, kw1 in enumerate(keywords):
for kw2 in keywords[i+1:]:
# 统一排序,避免重复
pair = tuple(sorted([kw1, kw2]))
cooccurrence[pair] += 1
# 过滤低频共现
filtered_pairs = [
(pair, count) for pair, count in cooccurrence.items()
if count >= min_frequency
]
# 排序并取TOP N
top_pairs = sorted(filtered_pairs, key=lambda x: x[1], reverse=True)[:top_n]
# 构建结果
result_pairs = []
for (kw1, kw2), count in top_pairs:
# 找出同时包含两个关键词的标题样本
titles_with_both = [
title for title in keyword_titles[kw1]
if kw2 in self._extract_keywords(title)
]
result_pairs.append({
"keyword1": kw1,
"keyword2": kw2,
"cooccurrence_count": count,
"sample_titles": titles_with_both[:3]
})
return {
"success": True,
"cooccurrence_pairs": result_pairs,
"total_pairs": len(result_pairs),
"min_frequency": min_frequency,
"generated_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def analyze_sentiment(
self,
topic: Optional[str] = None,
platforms: Optional[List[str]] = None,
date_range: Optional[Dict[str, str]] = None,
limit: int = 50,
sort_by_weight: bool = True,
include_url: bool = False
) -> Dict:
"""
情感倾向分析 - 生成用于 AI 情感分析的结构化提示词
本工具收集新闻数据并生成优化的 AI 提示词,你可以将其发送给 AI 进行深度情感分析。
Args:
topic: 话题关键词(可选),只分析包含该关键词的新闻
platforms: 平台过滤列表(可选),如 ['zhihu', 'weibo']
date_range: 日期范围(可选),格式: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
不指定则默认查询今天的数据
limit: 返回新闻数量限制,默认50,最大100
sort_by_weight: 是否按权重排序,默认True(推荐)
include_url: 是否包含URL链接,默认False(节省token)
Returns:
包含 AI 提示词和新闻数据的结构化结果
Examples:
用户询问示例:
- "分析一下今天新闻的情感倾向"
- "看看'特斯拉'相关新闻是正面还是负面的"
- "分析各平台对'人工智能'的情感态度"
- "看看'特斯拉'相关新闻是正面还是负面的,请选择一周内的前10条新闻来分析"
代码调用示例:
>>> tools = AnalyticsTools()
>>> # 分析今天的特斯拉新闻,返回前10条
>>> result = tools.analyze_sentiment(
... topic="特斯拉",
... limit=10
... )
>>> # 分析一周内的特斯拉新闻(假设今天是 2025-11-17)
>>> result = tools.analyze_sentiment(
... topic="特斯拉",
... date_range={"start": "2025-11-11", "end": "2025-11-17"},
... limit=10
... )
>>> print(result['ai_prompt']) # 获取生成的提示词
"""
try:
# 参数验证
if topic:
topic = validate_keyword(topic)
platforms = validate_platforms(platforms)
limit = validate_limit(limit, default=50)
# 处理日期范围
if date_range:
date_range_tuple = validate_date_range(date_range)
start_date, end_date = date_range_tuple
else:
# 默认今天
start_date = end_date = datetime.now()
# 收集新闻数据(支持多天)
all_news_items = []
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date(
date=current_date,
platform_ids=platforms
)
# 收集该日期的新闻
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 如果指定了话题,只收集包含话题的标题
if topic and topic.lower() not in title.lower():
continue
news_item = {
"platform": platform_name,
"title": title,
"ranks": info.get("ranks", []),
"count": len(info.get("ranks", [])),
"date": current_date.strftime("%Y-%m-%d")
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
all_news_items.append(news_item)
except DataNotFoundError:
# 该日期没有数据,继续下一天
pass
# 下一天
current_date += timedelta(days=1)
if not all_news_items:
time_desc = "今天" if start_date == end_date else f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
raise DataNotFoundError(
f"未找到相关新闻({time_desc})",
suggestion="请尝试其他话题、日期范围或平台"
)
# 去重(同一标题只保留一次)
unique_news = {}
for item in all_news_items:
key = f"{item['platform']}::{item['title']}"
if key not in unique_news:
unique_news[key] = item
else:
# 合并 ranks(如果同一新闻在多天出现)
existing = unique_news[key]
existing["ranks"].extend(item["ranks"])
existing["count"] = len(existing["ranks"])
deduplicated_news = list(unique_news.values())
# 按权重排序(如果启用)
if sort_by_weight:
deduplicated_news.sort(
key=lambda x: calculate_news_weight(x),
reverse=True
)
# 限制返回数量
selected_news = deduplicated_news[:limit]
# 生成 AI 提示词
ai_prompt = self._create_sentiment_analysis_prompt(
news_data=selected_news,
topic=topic
)
# 构建时间范围描述
if start_date == end_date:
time_range_desc = start_date.strftime("%Y-%m-%d")
else:
time_range_desc = f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
result = {
"success": True,
"method": "ai_prompt_generation",
"summary": {
"total_found": len(deduplicated_news),
"returned_count": len(selected_news),
"requested_limit": limit,
"duplicates_removed": len(all_news_items) - len(deduplicated_news),
"topic": topic,
"time_range": time_range_desc,
"platforms": list(set(item["platform"] for item in selected_news)),
"sorted_by_weight": sort_by_weight
},
"ai_prompt": ai_prompt,
"news_sample": selected_news,
"usage_note": "请将 ai_prompt 字段的内容发送给 AI 进行情感分析"
}
# 如果返回数量少于请求数量,增加提示
if len(selected_news) < limit and len(deduplicated_news) >= limit:
result["note"] = "返回数量少于请求数量是因为去重逻辑(同一标题在不同平台只保留一次)"
elif len(deduplicated_news) < limit:
result["note"] = f"在指定时间范围内仅找到 {len(deduplicated_news)} 条匹配的新闻"
return result
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def _create_sentiment_analysis_prompt(
self,
news_data: List[Dict],
topic: Optional[str]
) -> str:
"""
创建情感分析的 AI 提示词
Args:
news_data: 新闻数据列表(已排序和限制数量)
topic: 话题关键词
Returns:
格式化的 AI 提示词
"""
# 按平台分组
platform_news = defaultdict(list)
for item in news_data:
platform_news[item["platform"]].append({
"title": item["title"],
"date": item.get("date", "")
})
# 构建提示词
prompt_parts = []
# 1. 任务说明
if topic:
prompt_parts.append(f"请分析以下关于「{topic}」的新闻标题的情感倾向。")
else:
prompt_parts.append("请分析以下新闻标题的情感倾向。")
prompt_parts.append("")
prompt_parts.append("分析要求:")
prompt_parts.append("1. 识别每条新闻的情感倾向(正面/负面/中性)")
prompt_parts.append("2. 统计各情感类别的数量和百分比")
prompt_parts.append("3. 分析不同平台的情感差异")
prompt_parts.append("4. 总结整体情感趋势")
prompt_parts.append("5. 列举典型的正面和负面新闻样本")
prompt_parts.append("")
# 2. 数据概览
prompt_parts.append(f"数据概览:")
prompt_parts.append(f"- 总新闻数:{len(news_data)}")
prompt_parts.append(f"- 覆盖平台:{len(platform_news)}")
# 时间范围
dates = set(item.get("date", "") for item in news_data if item.get("date"))
if dates:
date_list = sorted(dates)
if len(date_list) == 1:
prompt_parts.append(f"- 时间范围:{date_list[0]}")
else:
prompt_parts.append(f"- 时间范围:{date_list[0]} 至 {date_list[-1]}")
prompt_parts.append("")
# 3. 按平台展示新闻
prompt_parts.append("新闻列表(按平台分类,已按重要性排序):")
prompt_parts.append("")
for platform, items in sorted(platform_news.items()):
prompt_parts.append(f"【{platform}】({len(items)} 条)")
for i, item in enumerate(items, 1):
title = item["title"]
date_str = f" [{item['date']}]" if item.get("date") else ""
prompt_parts.append(f"{i}. {title}{date_str}")
prompt_parts.append("")
# 4. 输出格式说明
prompt_parts.append("请按以下格式输出分析结果:")
prompt_parts.append("")
prompt_parts.append("## 情感分布统计")
prompt_parts.append("- 正面:XX条 (XX%)")
prompt_parts.append("- 负面:XX条 (XX%)")
prompt_parts.append("- 中性:XX条 (XX%)")
prompt_parts.append("")
prompt_parts.append("## 平台情感对比")
prompt_parts.append("[各平台的情感倾向差异]")
prompt_parts.append("")
prompt_parts.append("## 整体情感趋势")
prompt_parts.append("[总体分析和关键发现]")
prompt_parts.append("")
prompt_parts.append("## 典型样本")
prompt_parts.append("正面新闻样本:")
prompt_parts.append("[列举3-5条]")
prompt_parts.append("")
prompt_parts.append("负面新闻样本:")
prompt_parts.append("[列举3-5条]")
return "\n".join(prompt_parts)
def find_similar_news(
self,
reference_title: str,
threshold: float = 0.6,
limit: int = 50,
include_url: bool = False
) -> Dict:
"""
相似新闻查找 - 基于标题相似度查找相关新闻
Args:
reference_title: 参考标题
threshold: 相似度阈值(0-1之间)
limit: 返回条数限制,默认50
include_url: 是否包含URL链接,默认False(节省token)
Returns:
相似新闻列表
Examples:
用户询问示例:
- "找出和'特斯拉降价'相似的新闻"
- "查找关于iPhone发布的类似报道"
- "看看有没有和这条新闻相似的报道"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.find_similar_news(
... reference_title="特斯拉宣布降价",
... threshold=0.6,
... limit=10
... )
>>> print(result['similar_news'])
"""
try:
# 参数验证
reference_title = validate_keyword(reference_title)
if not 0 <= threshold <= 1:
raise InvalidParameterError(
"threshold 必须在 0 到 1 之间",
suggestion="推荐值:0.5-0.8"
)
limit = validate_limit(limit, default=50)
# 读取数据
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date()
# 计算相似度
similar_items = []
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
if title == reference_title:
continue
# 计算相似度
similarity = self._calculate_similarity(reference_title, title)
if similarity >= threshold:
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"similarity": round(similarity, 3),
"rank": info["ranks"][0] if info["ranks"] else 0
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
similar_items.append(news_item)
# 按相似度排序
similar_items.sort(key=lambda x: x["similarity"], reverse=True)
# 限制数量
result_items = similar_items[:limit]
if not result_items:
raise DataNotFoundError(
f"未找到相似度超过 {threshold} 的新闻",
suggestion="请降低相似度阈值或尝试其他标题"
)
result = {
"success": True,
"summary": {
"total_found": len(similar_items),
"returned_count": len(result_items),
"requested_limit": limit,
"threshold": threshold,
"reference_title": reference_title
},
"similar_news": result_items
}
if len(similar_items) < limit:
result["note"] = f"相似度阈值 {threshold} 下仅找到 {len(similar_items)} 条相似新闻"
return result
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def search_by_entity(
self,
entity: str,
entity_type: Optional[str] = None,
limit: int = 50,
sort_by_weight: bool = True
) -> Dict:
"""
实体识别搜索 - 搜索包含特定人物/地点/机构的新闻
Args:
entity: 实体名称
entity_type: 实体类型(person/location/organization),可选
limit: 返回条数限制,默认50,最大200
sort_by_weight: 是否按权重排序,默认True
Returns:
实体相关新闻列表
Examples:
用户询问示例:
- "搜索马斯克相关的新闻"
- "查找关于特斯拉公司的报道,返回前20条"
- "看看北京有什么新闻"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.search_by_entity(
... entity="马斯克",
... entity_type="person",
... limit=20
... )
>>> print(result['related_news'])
"""
try:
# 参数验证
entity = validate_keyword(entity)
limit = validate_limit(limit, default=50)
if entity_type and entity_type not in ["person", "location", "organization"]:
raise InvalidParameterError(
f"无效的实体类型: {entity_type}",
suggestion="支持的类型: person, location, organization"
)
# 读取数据
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date()
# 搜索包含实体的新闻
related_news = []
entity_context = Counter() # 统计实体周边的词
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
if entity in title:
url = info.get("url", "")
mobile_url = info.get("mobileUrl", "")
ranks = info.get("ranks", [])
count = len(ranks)
related_news.append({
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"url": url,
"mobileUrl": mobile_url,
"ranks": ranks,
"count": count,
"rank": ranks[0] if ranks else 999
})
# 提取实体周边的关键词
keywords = self._extract_keywords(title)
entity_context.update(keywords)
if not related_news:
raise DataNotFoundError(
f"未找到包含实体 '{entity}' 的新闻",
suggestion="请尝试其他实体名称"
)
# 移除实体本身
if entity in entity_context:
del entity_context[entity]
# 按权重排序(如果启用)
if sort_by_weight:
related_news.sort(
key=lambda x: calculate_news_weight(x),
reverse=True
)
else:
# 按排名排序
related_news.sort(key=lambda x: x["rank"])
# 限制返回数量
result_news = related_news[:limit]
return {
"success": True,
"entity": entity,
"entity_type": entity_type or "auto",
"related_news": result_news,
"total_found": len(related_news),
"returned_count": len(result_news),
"sorted_by_weight": sort_by_weight,
"related_keywords": [
{"keyword": k, "count": v}
for k, v in entity_context.most_common(10)
]
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def generate_summary_report(
self,
report_type: str = "daily",
date_range: Optional[Dict[str, str]] = None
) -> Dict:
"""
每日/每周摘要生成器 - 自动生成热点摘要报告
Args:
report_type: 报告类型(daily/weekly)
date_range: 自定义日期范围(可选)
Returns:
Markdown格式的摘要报告
Examples:
用户询问示例:
- "生成今天的新闻摘要报告"
- "给我一份本周的热点总结"
- "生成过去7天的新闻分析报告"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.generate_summary_report(
... report_type="daily"
... )
>>> print(result['markdown_report'])
"""
try:
# 参数验证
if report_type not in ["daily", "weekly"]:
raise InvalidParameterError(
f"无效的报告类型: {report_type}",
suggestion="支持的类型: daily, weekly"
)
# 确定日期范围
if date_range:
date_range_tuple = validate_date_range(date_range)
start_date, end_date = date_range_tuple
else:
if report_type == "daily":
start_date = end_date = datetime.now()
else: # weekly
end_date = datetime.now()
start_date = end_date - timedelta(days=6)
# 收集数据
all_keywords = Counter()
all_platforms_news = defaultdict(int)
all_titles_list = []
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date(
date=current_date
)
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
all_platforms_news[platform_name] += len(titles)
for title in titles.keys():
all_titles_list.append({
"title": title,
"platform": platform_name,
"date": current_date.strftime("%Y-%m-%d")
})
# 提取关键词
keywords = self._extract_keywords(title)
all_keywords.update(keywords)
except DataNotFoundError:
pass
current_date += timedelta(days=1)
# 生成报告
report_title = f"{'每日' if report_type == 'daily' else '每周'}新闻热点摘要"
date_str = f"{start_date.strftime('%Y-%m-%d')}" if report_type == "daily" else f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
# 构建Markdown报告
markdown = f"""# {report_title}
**报告日期**: {date_str}
**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
---
## 📊 数据概览
- **总新闻数**: {len(all_titles_list)}
- **覆盖平台**: {len(all_platforms_news)}
- **热门关键词数**: {len(all_keywords)}
## 🔥 TOP 10 热门话题
"""
# 添加TOP 10关键词
for i, (keyword, count) in enumerate(all_keywords.most_common(10), 1):
markdown += f"{i}. **{keyword}** - 出现 {count} 次\n"
# 平台分析
markdown += "\n## 📱 平台活跃度\n\n"
sorted_platforms = sorted(all_platforms_news.items(), key=lambda x: x[1], reverse=True)
for platform, count in sorted_platforms:
markdown += f"- **{platform}**: {count} 条新闻\n"
# 趋势变化(如果是周报)
if report_type == "weekly":
markdown += "\n## 📈 趋势分析\n\n"
markdown += "本周热度持续的话题(样本数据):\n\n"
# 简单的趋势分析
top_keywords = [kw for kw, _ in all_keywords.most_common(5)]
for keyword in top_keywords:
markdown += f"- **{keyword}**: 持续热门\n"
# 添加样本新闻(按权重选择,确保确定性)
markdown += "\n## 📰 精选新闻样本\n\n"
# 确定性选取:按标题的权重排序,取前5条
# 这样相同输入总是返回相同结果
if all_titles_list:
# 计算每条新闻的权重分数(基于关键词出现次数)
news_with_scores = []
for news in all_titles_list:
# 简单权重:统计包含TOP关键词的次数
score = 0
title_lower = news['title'].lower()
for keyword, count in all_keywords.most_common(10):
if keyword.lower() in title_lower:
score += count
news_with_scores.append((news, score))
# 按权重降序排序,权重相同则按标题字母顺序(确保确定性)
news_with_scores.sort(key=lambda x: (-x[1], x[0]['title']))
# 取前5条
sample_news = [item[0] for item in news_with_scores[:5]]
for news in sample_news:
markdown += f"- [{news['platform']}] {news['title']}\n"
markdown += "\n---\n\n*本报告由 TrendRadar MCP 自动生成*\n"
return {
"success": True,
"report_type": report_type,
"date_range": {
"start": start_date.strftime("%Y-%m-%d"),
"end": end_date.strftime("%Y-%m-%d")
},
"markdown_report": markdown,
"statistics": {
"total_news": len(all_titles_list),
"platforms_count": len(all_platforms_news),
"keywords_count": len(all_keywords),
"top_keyword": all_keywords.most_common(1)[0] if all_keywords else None
}
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def get_platform_activity_stats(
self,
date_range: Optional[Dict[str, str]] = None
) -> Dict:
"""
平台活跃度统计 - 统计各平台的发布频率和活跃时间段
Args:
date_range: 日期范围(可选)
Returns:
平台活跃度统计结果
Examples:
用户询问示例:
- "统计各平台今天的活跃度"
- "看看哪个平台更新最频繁"
- "分析各平台的发布时间规律"
代码调用示例:
>>> # 查看各平台活跃度(假设今天是 2025-11-17)
>>> result = tools.get_platform_activity_stats(
... date_range={"start": "2025-11-08", "end": "2025-11-17"}
... )
>>> print(result['platform_activity'])
"""
try:
# 参数验证
date_range_tuple = validate_date_range(date_range)
# 确定日期范围
if date_range_tuple:
start_date, end_date = date_range_tuple
else:
start_date = end_date = datetime.now()
# 统计各平台活跃度
platform_activity = defaultdict(lambda: {
"total_updates": 0,
"days_active": set(),
"news_count": 0,
"hourly_distribution": Counter()
})
# 遍历日期范围
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, timestamps = self.data_service.parser.read_all_titles_for_date(
date=current_date
)
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
platform_activity[platform_name]["news_count"] += len(titles)
platform_activity[platform_name]["days_active"].add(current_date.strftime("%Y-%m-%d"))
# 统计更新次数(基于文件数量)
platform_activity[platform_name]["total_updates"] += len(timestamps)
# 统计时间分布(基于文件名中的时间)
for filename in timestamps.keys():
# 解析文件名中的小时(格式:HHMM.txt)
match = re.match(r'(\d{2})(\d{2})\.txt', filename)
if match:
hour = int(match.group(1))
platform_activity[platform_name]["hourly_distribution"][hour] += 1
except DataNotFoundError:
pass
current_date += timedelta(days=1)
# 转换为可序列化的格式
result_activity = {}
for platform, stats in platform_activity.items():
days_count = len(stats["days_active"])
avg_news_per_day = stats["news_count"] / days_count if days_count > 0 else 0
# 找出最活跃的时间段
most_active_hours = stats["hourly_distribution"].most_common(3)
result_activity[platform] = {
"total_updates": stats["total_updates"],
"news_count": stats["news_count"],
"days_active": days_count,
"avg_news_per_day": round(avg_news_per_day, 2),
"most_active_hours": [
{"hour": f"{hour:02d}:00", "count": count}
for hour, count in most_active_hours
],
"activity_score": round(stats["news_count"] / max(days_count, 1), 2)
}
# 按活跃度排序
sorted_platforms = sorted(
result_activity.items(),
key=lambda x: x[1]["activity_score"],
reverse=True
)
return {
"success": True,
"date_range": {
"start": start_date.strftime("%Y-%m-%d"),
"end": end_date.strftime("%Y-%m-%d")
},
"platform_activity": dict(sorted_platforms),
"most_active_platform": sorted_platforms[0][0] if sorted_platforms else None,
"total_platforms": len(result_activity)
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def analyze_topic_lifecycle(
self,
topic: str,
date_range: Optional[Dict[str, str]] = None
) -> Dict:
"""
话题生命周期分析 - 追踪话题从出现到消失的完整周期
Args:
topic: 话题关键词
date_range: 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **默认**: 不指定时默认分析最近7天
Returns:
话题生命周期分析结果
Examples:
用户询问示例:
- "分析'人工智能'这个话题的生命周期"
- "看看'iPhone'话题是昙花一现还是持续热点"
- "追踪'比特币'话题的热度变化"
代码调用示例:
>>> # 分析话题生命周期(假设今天是 2025-11-17)
>>> result = tools.analyze_topic_lifecycle(
... topic="人工智能",
... date_range={"start": "2025-10-19", "end": "2025-11-17"}
... )
>>> print(result['lifecycle_stage'])
"""
try:
# 参数验证
topic = validate_keyword(topic)
# 处理日期范围(不指定时默认最近7天)
if date_range:
from ..utils.validators import validate_date_range
date_range_tuple = validate_date_range(date_range)
start_date, end_date = date_range_tuple
else:
# 默认最近7天
end_date = datetime.now()
start_date = end_date - timedelta(days=6)
# 收集话题历史数据
lifecycle_data = []
current_date = start_date
while current_date <= end_date:
try:
all_titles, _, _ = self.data_service.parser.read_all_titles_for_date(
date=current_date
)
# 统计该日的话题出现次数
count = 0
for _, titles in all_titles.items():
for title in titles.keys():
if topic.lower() in title.lower():
count += 1
lifecycle_data.append({
"date": current_date.strftime("%Y-%m-%d"),
"count": count
})
except DataNotFoundError:
lifecycle_data.append({
"date": current_date.strftime("%Y-%m-%d"),
"count": 0
})
current_date += timedelta(days=1)
# 计算分析天数
total_days = (end_date - start_date).days + 1
# 分析生命周期阶段
counts = [item["count"] for item in lifecycle_data]
if not any(counts):
time_desc = f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
raise DataNotFoundError(
f"在 {time_desc} 内未找到话题 '{topic}'",
suggestion="请尝试其他话题或扩大时间范围"
)
# 找到首次出现和最后出现
first_appearance = next((item["date"] for item in lifecycle_data if item["count"] > 0), None)
last_appearance = next((item["date"] for item in reversed(lifecycle_data) if item["count"] > 0), None)
# 计算峰值
max_count = max(counts)
peak_index = counts.index(max_count)
peak_date = lifecycle_data[peak_index]["date"]
# 计算平均值和标准差(简单实现)
non_zero_counts = [c for c in counts if c > 0]
avg_count = sum(non_zero_counts) / len(non_zero_counts) if non_zero_counts else 0
# 判断生命周期阶段
recent_counts = counts[-3:] # 最近3天
early_counts = counts[:3] # 前3天
if sum(recent_counts) > sum(early_counts):
lifecycle_stage = "上升期"
elif sum(recent_counts) < sum(early_counts) * 0.5:
lifecycle_stage = "衰退期"
elif max_count in recent_counts:
lifecycle_stage = "爆发期"
else:
lifecycle_stage = "稳定期"
# 分类:昙花一现 vs 持续热点
active_days = sum(1 for c in counts if c > 0)
if active_days <= 2 and max_count > avg_count * 2:
topic_type = "昙花一现"
elif active_days >= total_days * 0.6:
topic_type = "持续热点"
else:
topic_type = "周期性热点"
return {
"success": True,
"topic": topic,
"date_range": {
"start": start_date.strftime("%Y-%m-%d"),
"end": end_date.strftime("%Y-%m-%d"),
"total_days": total_days
},
"lifecycle_data": lifecycle_data,
"analysis": {
"first_appearance": first_appearance,
"last_appearance": last_appearance,
"peak_date": peak_date,
"peak_count": max_count,
"active_days": active_days,
"avg_daily_mentions": round(avg_count, 2),
"lifecycle_stage": lifecycle_stage,
"topic_type": topic_type
}
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def detect_viral_topics(
self,
threshold: float = 3.0,
time_window: int = 24
) -> Dict:
"""
异常热度检测 - 自动识别突然爆火的话题
Args:
threshold: 热度突增倍数阈值
time_window: 检测时间窗口(小时)
Returns:
爆火话题列表
Examples:
用户询问示例:
- "检测今天有哪些突然爆火的话题"
- "看看有没有热度异常的新闻"
- "预警可能的重大事件"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.detect_viral_topics(
... threshold=3.0,
... time_window=24
... )
>>> print(result['viral_topics'])
"""
try:
# 参数验证
if threshold < 1.0:
raise InvalidParameterError(
"threshold 必须大于等于 1.0",
suggestion="推荐值:2.0-5.0"
)
time_window = validate_limit(time_window, default=24, max_limit=72)
# 读取当前和之前的数据
current_all_titles, _, _ = self.data_service.parser.read_all_titles_for_date()
# 读取昨天的数据作为基准
yesterday = datetime.now() - timedelta(days=1)
try:
previous_all_titles, _, _ = self.data_service.parser.read_all_titles_for_date(
date=yesterday
)
except DataNotFoundError:
previous_all_titles = {}
# 统计当前的关键词频率
current_keywords = Counter()
current_keyword_titles = defaultdict(list)
for _, titles in current_all_titles.items():
for title in titles.keys():
keywords = self._extract_keywords(title)
current_keywords.update(keywords)
for kw in keywords:
current_keyword_titles[kw].append(title)
# 统计之前的关键词频率
previous_keywords = Counter()
for _, titles in previous_all_titles.items():
for title in titles.keys():
keywords = self._extract_keywords(title)
previous_keywords.update(keywords)
# 检测异常热度
viral_topics = []
for keyword, current_count in current_keywords.items():
previous_count = previous_keywords.get(keyword, 0)
# 计算增长倍数
if previous_count == 0:
# 新出现的话题
if current_count >= 5: # 至少出现5次才认为是爆火
growth_rate = float('inf')
is_viral = True
else:
continue
else:
growth_rate = current_count / previous_count
is_viral = growth_rate >= threshold
if is_viral:
viral_topics.append({
"keyword": keyword,
"current_count": current_count,
"previous_count": previous_count,
"growth_rate": round(growth_rate, 2) if growth_rate != float('inf') else "新话题",
"sample_titles": current_keyword_titles[keyword][:3],
"alert_level": "高" if growth_rate > threshold * 2 else "中"
})
# 按增长率排序
viral_topics.sort(
key=lambda x: x["current_count"] if x["growth_rate"] == "新话题" else x["growth_rate"],
reverse=True
)
if not viral_topics:
return {
"success": True,
"viral_topics": [],
"total_detected": 0,
"message": f"未检测到热度增长超过 {threshold} 倍的话题"
}
return {
"success": True,
"viral_topics": viral_topics,
"total_detected": len(viral_topics),
"threshold": threshold,
"time_window": time_window,
"detection_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def predict_trending_topics(
self,
lookahead_hours: int = 6,
confidence_threshold: float = 0.7
) -> Dict:
"""
话题预测 - 基于历史数据预测未来可能的热点
Args:
lookahead_hours: 预测未来多少小时
confidence_threshold: 置信度阈值
Returns:
预测的潜力话题列表
Examples:
用户询问示例:
- "预测接下来6小时可能的热点话题"
- "有哪些话题可能会火起来"
- "早期发现潜力话题"
代码调用示例:
>>> tools = AnalyticsTools()
>>> result = tools.predict_trending_topics(
... lookahead_hours=6,
... confidence_threshold=0.7
... )
>>> print(result['predicted_topics'])
"""
try:
# 参数验证
lookahead_hours = validate_limit(lookahead_hours, default=6, max_limit=48)
if not 0 <= confidence_threshold <= 1:
raise InvalidParameterError(
"confidence_threshold 必须在 0 到 1 之间",
suggestion="推荐值:0.6-0.8"
)
# 收集最近3天的数据用于预测
keyword_trends = defaultdict(list)
for days_ago in range(3, 0, -1):
date = datetime.now() - timedelta(days=days_ago)
try:
all_titles, _, _ = self.data_service.parser.read_all_titles_for_date(
date=date
)
# 统计关键词
keywords_count = Counter()
for _, titles in all_titles.items():
for title in titles.keys():
keywords = self._extract_keywords(title)
keywords_count.update(keywords)
# 记录每个关键词的历史数据
for keyword, count in keywords_count.items():
keyword_trends[keyword].append(count)
except DataNotFoundError:
pass
# 添加今天的数据
try:
all_titles, _, _ = self.data_service.parser.read_all_titles_for_date()
keywords_count = Counter()
keyword_titles = defaultdict(list)
for _, titles in all_titles.items():
for title in titles.keys():
keywords = self._extract_keywords(title)
keywords_count.update(keywords)
for kw in keywords:
keyword_titles[kw].append(title)
for keyword, count in keywords_count.items():
keyword_trends[keyword].append(count)
except DataNotFoundError:
raise DataNotFoundError(
"未找到今天的数据",
suggestion="请等待爬虫任务完成"
)
# 预测潜力话题
predicted_topics = []
for keyword, trend_data in keyword_trends.items():
if len(trend_data) < 2:
continue
# 简单的线性趋势预测
# 计算增长率
recent_value = trend_data[-1]
previous_value = trend_data[-2] if len(trend_data) >= 2 else 0
if previous_value == 0:
if recent_value >= 3:
growth_rate = 1.0
else:
continue
else:
growth_rate = (recent_value - previous_value) / previous_value
# 判断是否是上升趋势
if growth_rate > 0.3: # 增长超过30%
# 计算置信度(基于趋势的稳定性)
if len(trend_data) >= 3:
# 检查是否连续增长
is_consistent = all(
trend_data[i] <= trend_data[i+1]
for i in range(len(trend_data)-1)
)
confidence = 0.9 if is_consistent else 0.7
else:
confidence = 0.6
if confidence >= confidence_threshold:
predicted_topics.append({
"keyword": keyword,
"current_count": recent_value,
"growth_rate": round(growth_rate * 100, 2),
"confidence": round(confidence, 2),
"trend_data": trend_data,
"prediction": "上升趋势,可能成为热点",
"sample_titles": keyword_titles.get(keyword, [])[:3]
})
# 按置信度和增长率排序
predicted_topics.sort(
key=lambda x: (x["confidence"], x["growth_rate"]),
reverse=True
)
return {
"success": True,
"predicted_topics": predicted_topics[:20], # 返回TOP 20
"total_predicted": len(predicted_topics),
"lookahead_hours": lookahead_hours,
"confidence_threshold": confidence_threshold,
"prediction_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"note": "预测基于历史趋势,实际结果可能有偏差"
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
# ==================== 辅助方法 ====================
def _extract_keywords(self, title: str, min_length: int = 2) -> List[str]:
"""
从标题中提取关键词(简单实现)
Args:
title: 标题文本
min_length: 最小关键词长度
Returns:
关键词列表
"""
# 移除URL和特殊字符
title = re.sub(r'http[s]?://\S+', '', title)
title = re.sub(r'[^\w\s]', ' ', title)
# 简单分词(按空格和常见分隔符)
words = re.split(r'[\s,。!?、]+', title)
# 过滤停用词和短词
stopwords = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'}
keywords = [
word.strip() for word in words
if word.strip() and len(word.strip()) >= min_length and word.strip() not in stopwords
]
return keywords
def _calculate_similarity(self, text1: str, text2: str) -> float:
"""
计算两个文本的相似度
Args:
text1: 文本1
text2: 文本2
Returns:
相似度分数(0-1之间)
"""
# 使用 SequenceMatcher 计算相似度
return SequenceMatcher(None, text1, text2).ratio()
def _find_unique_topics(self, platform_stats: Dict) -> Dict[str, List[str]]:
"""
找出各平台独有的热点话题
Args:
platform_stats: 平台统计数据
Returns:
各平台独有话题字典
"""
unique_topics = {}
# 获取每个平台的TOP关键词
platform_keywords = {}
for platform, stats in platform_stats.items():
top_keywords = set([kw for kw, _ in stats["top_keywords"].most_common(10)])
platform_keywords[platform] = top_keywords
# 找出独有关键词
for platform, keywords in platform_keywords.items():
# 找出其他平台的所有关键词
other_keywords = set()
for other_platform, other_kws in platform_keywords.items():
if other_platform != platform:
other_keywords.update(other_kws)
# 找出独有的
unique = keywords - other_keywords
if unique:
unique_topics[platform] = list(unique)[:5] # 最多5个
return unique_topics
|
2302_81331056/TrendRadar
|
mcp_server/tools/analytics.py
|
Python
|
agpl-3.0
| 74,660
|
"""
配置管理工具
实现配置查询和管理功能。
"""
from typing import Dict, Optional
from ..services.data_service import DataService
from ..utils.validators import validate_config_section
from ..utils.errors import MCPError
class ConfigManagementTools:
"""配置管理工具类"""
def __init__(self, project_root: str = None):
"""
初始化配置管理工具
Args:
project_root: 项目根目录
"""
self.data_service = DataService(project_root)
def get_current_config(self, section: Optional[str] = None) -> Dict:
"""
获取当前系统配置
Args:
section: 配置节 - all/crawler/push/keywords/weights,默认all
Returns:
配置字典
Example:
>>> tools = ConfigManagementTools()
>>> result = tools.get_current_config(section="crawler")
>>> print(result['crawler']['platforms'])
"""
try:
# 参数验证
section = validate_config_section(section)
# 获取配置
config = self.data_service.get_current_config(section=section)
return {
"config": config,
"section": section,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
|
2302_81331056/TrendRadar
|
mcp_server/tools/config_mgmt.py
|
Python
|
agpl-3.0
| 1,684
|
"""
数据查询工具
实现P0核心的数据查询工具。
"""
from typing import Dict, List, Optional
from ..services.data_service import DataService
from ..utils.validators import (
validate_platforms,
validate_limit,
validate_keyword,
validate_date_range,
validate_top_n,
validate_mode,
validate_date_query
)
from ..utils.errors import MCPError
class DataQueryTools:
"""数据查询工具类"""
def __init__(self, project_root: str = None):
"""
初始化数据查询工具
Args:
project_root: 项目根目录
"""
self.data_service = DataService(project_root)
def get_latest_news(
self,
platforms: Optional[List[str]] = None,
limit: Optional[int] = None,
include_url: bool = False
) -> Dict:
"""
获取最新一批爬取的新闻数据
Args:
platforms: 平台ID列表,如 ['zhihu', 'weibo']
limit: 返回条数限制,默认20
include_url: 是否包含URL链接,默认False(节省token)
Returns:
新闻列表字典
Example:
>>> tools = DataQueryTools()
>>> result = tools.get_latest_news(platforms=['zhihu'], limit=10)
>>> print(result['total'])
10
"""
try:
# 参数验证
platforms = validate_platforms(platforms)
limit = validate_limit(limit, default=50)
# 获取数据
news_list = self.data_service.get_latest_news(
platforms=platforms,
limit=limit,
include_url=include_url
)
return {
"news": news_list,
"total": len(news_list),
"platforms": platforms,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def search_news_by_keyword(
self,
keyword: str,
date_range: Optional[Dict] = None,
platforms: Optional[List[str]] = None,
limit: Optional[int] = None
) -> Dict:
"""
按关键词搜索历史新闻
Args:
keyword: 搜索关键词(必需)
date_range: 日期范围,格式: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
platforms: 平台过滤列表
limit: 返回条数限制(可选,默认返回所有)
Returns:
搜索结果字典
Example (假设今天是 2025-11-17):
>>> tools = DataQueryTools()
>>> result = tools.search_news_by_keyword(
... keyword="人工智能",
... date_range={"start": "2025-11-08", "end": "2025-11-17"},
... limit=50
... )
>>> print(result['total'])
"""
try:
# 参数验证
keyword = validate_keyword(keyword)
date_range_tuple = validate_date_range(date_range)
platforms = validate_platforms(platforms)
if limit is not None:
limit = validate_limit(limit, default=100)
# 搜索数据
search_result = self.data_service.search_news_by_keyword(
keyword=keyword,
date_range=date_range_tuple,
platforms=platforms,
limit=limit
)
return {
**search_result,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def get_trending_topics(
self,
top_n: Optional[int] = None,
mode: Optional[str] = None
) -> Dict:
"""
获取个人关注词的新闻出现频率统计
注意:本工具基于 config/frequency_words.txt 中的个人关注词列表进行统计,
而不是自动从新闻中提取热点话题。这是一个个人可定制的关注词列表,
用户可以根据自己的兴趣添加或删除关注词。
Args:
top_n: 返回TOP N关注词,默认10
mode: 模式 - daily(当日累计), current(最新一批), incremental(增量)
Returns:
关注词频率统计字典,包含每个关注词在新闻中出现的次数
Example:
>>> tools = DataQueryTools()
>>> result = tools.get_trending_topics(top_n=5, mode="current")
>>> print(len(result['topics']))
5
>>> # 返回的是你在 frequency_words.txt 中设置的关注词的频率统计
"""
try:
# 参数验证
top_n = validate_top_n(top_n, default=10)
valid_modes = ["daily", "current", "incremental"]
mode = validate_mode(mode, valid_modes, default="current")
# 获取趋势话题
trending_result = self.data_service.get_trending_topics(
top_n=top_n,
mode=mode
)
return {
**trending_result,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def get_news_by_date(
self,
date_query: Optional[str] = None,
platforms: Optional[List[str]] = None,
limit: Optional[int] = None,
include_url: bool = False
) -> Dict:
"""
按日期查询新闻,支持自然语言日期
Args:
date_query: 日期查询字符串(可选,默认"今天"),支持:
- 相对日期:今天、昨天、前天、3天前、yesterday、3 days ago
- 星期:上周一、本周三、last monday、this friday
- 绝对日期:2025-10-10、10月10日、2025年10月10日
platforms: 平台ID列表,如 ['zhihu', 'weibo']
limit: 返回条数限制,默认50
include_url: 是否包含URL链接,默认False(节省token)
Returns:
新闻列表字典
Example:
>>> tools = DataQueryTools()
>>> # 不指定日期,默认查询今天
>>> result = tools.get_news_by_date(platforms=['zhihu'], limit=20)
>>> # 指定日期
>>> result = tools.get_news_by_date(
... date_query="昨天",
... platforms=['zhihu'],
... limit=20
... )
>>> print(result['total'])
20
"""
try:
# 参数验证 - 默认今天
if date_query is None:
date_query = "今天"
target_date = validate_date_query(date_query)
platforms = validate_platforms(platforms)
limit = validate_limit(limit, default=50)
# 获取数据
news_list = self.data_service.get_news_by_date(
target_date=target_date,
platforms=platforms,
limit=limit,
include_url=include_url
)
return {
"news": news_list,
"total": len(news_list),
"date": target_date.strftime("%Y-%m-%d"),
"date_query": date_query,
"platforms": platforms,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
|
2302_81331056/TrendRadar
|
mcp_server/tools/data_query.py
|
Python
|
agpl-3.0
| 8,664
|
"""
智能新闻检索工具
提供模糊搜索、链接查询、历史相关新闻检索等高级搜索功能。
"""
import re
from collections import Counter
from datetime import datetime, timedelta
from difflib import SequenceMatcher
from typing import Dict, List, Optional, Tuple
from ..services.data_service import DataService
from ..utils.validators import validate_keyword, validate_limit
from ..utils.errors import MCPError, InvalidParameterError, DataNotFoundError
class SearchTools:
"""智能新闻检索工具类"""
def __init__(self, project_root: str = None):
"""
初始化智能检索工具
Args:
project_root: 项目根目录
"""
self.data_service = DataService(project_root)
# 中文停用词列表
self.stopwords = {
'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',
'一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有',
'看', '好', '自己', '这', '那', '来', '被', '与', '为', '对', '将', '从',
'以', '及', '等', '但', '或', '而', '于', '中', '由', '可', '可以', '已',
'已经', '还', '更', '最', '再', '因为', '所以', '如果', '虽然', '然而'
}
def search_news_unified(
self,
query: str,
search_mode: str = "keyword",
date_range: Optional[Dict[str, str]] = None,
platforms: Optional[List[str]] = None,
limit: int = 50,
sort_by: str = "relevance",
threshold: float = 0.6,
include_url: bool = False
) -> Dict:
"""
统一新闻搜索工具 - 整合多种搜索模式
Args:
query: 查询内容(必需)- 关键词、内容片段或实体名称
search_mode: 搜索模式,可选值:
- "keyword": 精确关键词匹配(默认)
- "fuzzy": 模糊内容匹配(使用相似度算法)
- "entity": 实体名称搜索(自动按权重排序)
date_range: 日期范围(可选)
- **格式**: {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
- **示例**: {"start": "2025-01-01", "end": "2025-01-07"}
- **默认**: 不指定时默认查询今天
- **注意**: start和end可以相同(表示单日查询)
platforms: 平台过滤列表,如 ['zhihu', 'weibo']
limit: 返回条数限制,默认50
sort_by: 排序方式,可选值:
- "relevance": 按相关度排序(默认)
- "weight": 按新闻权重排序
- "date": 按日期排序
threshold: 相似度阈值(仅fuzzy模式有效),0-1之间,默认0.6
include_url: 是否包含URL链接,默认False(节省token)
Returns:
搜索结果字典,包含匹配的新闻列表
Examples:
- search_news_unified(query="人工智能", search_mode="keyword")
- search_news_unified(query="特斯拉降价", search_mode="fuzzy", threshold=0.4)
- search_news_unified(query="马斯克", search_mode="entity", limit=20)
- search_news_unified(query="iPhone 16", date_range={"start": "2025-01-01", "end": "2025-01-07"})
"""
try:
# 参数验证
query = validate_keyword(query)
if search_mode not in ["keyword", "fuzzy", "entity"]:
raise InvalidParameterError(
f"无效的搜索模式: {search_mode}",
suggestion="支持的模式: keyword, fuzzy, entity"
)
if sort_by not in ["relevance", "weight", "date"]:
raise InvalidParameterError(
f"无效的排序方式: {sort_by}",
suggestion="支持的排序: relevance, weight, date"
)
limit = validate_limit(limit, default=50)
threshold = max(0.0, min(1.0, threshold))
# 处理日期范围
if date_range:
from ..utils.validators import validate_date_range
date_range_tuple = validate_date_range(date_range)
start_date, end_date = date_range_tuple
else:
# 不指定日期时,使用最新可用数据日期(而非 datetime.now())
earliest, latest = self.data_service.get_available_date_range()
if latest is None:
# 没有任何可用数据
return {
"success": False,
"error": {
"code": "NO_DATA_AVAILABLE",
"message": "output 目录下没有可用的新闻数据",
"suggestion": "请先运行爬虫生成数据,或检查 output 目录"
}
}
# 使用最新可用日期
start_date = end_date = latest
# 收集所有匹配的新闻
all_matches = []
current_date = start_date
while current_date <= end_date:
try:
all_titles, id_to_name, timestamps = self.data_service.parser.read_all_titles_for_date(
date=current_date,
platform_ids=platforms
)
# 根据搜索模式执行不同的搜索逻辑
if search_mode == "keyword":
matches = self._search_by_keyword_mode(
query, all_titles, id_to_name, current_date, include_url
)
elif search_mode == "fuzzy":
matches = self._search_by_fuzzy_mode(
query, all_titles, id_to_name, current_date, threshold, include_url
)
else: # entity
matches = self._search_by_entity_mode(
query, all_titles, id_to_name, current_date, include_url
)
all_matches.extend(matches)
except DataNotFoundError:
# 该日期没有数据,继续下一天
pass
current_date += timedelta(days=1)
if not all_matches:
# 获取可用日期范围用于错误提示
earliest, latest = self.data_service.get_available_date_range()
# 判断时间范围描述
if start_date.date() == datetime.now().date() and start_date == end_date:
time_desc = "今天"
elif start_date == end_date:
time_desc = start_date.strftime("%Y-%m-%d")
else:
time_desc = f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
# 构建错误消息
if earliest and latest:
available_desc = f"{earliest.strftime('%Y-%m-%d')} 至 {latest.strftime('%Y-%m-%d')}"
message = f"未找到匹配的新闻(查询范围: {time_desc},可用数据: {available_desc})"
else:
message = f"未找到匹配的新闻({time_desc})"
result = {
"success": True,
"results": [],
"total": 0,
"query": query,
"search_mode": search_mode,
"time_range": time_desc,
"message": message
}
return result
# 统一排序逻辑
if sort_by == "relevance":
all_matches.sort(key=lambda x: x.get("similarity_score", 1.0), reverse=True)
elif sort_by == "weight":
from .analytics import calculate_news_weight
all_matches.sort(key=lambda x: calculate_news_weight(x), reverse=True)
elif sort_by == "date":
all_matches.sort(key=lambda x: x.get("date", ""), reverse=True)
# 限制返回数量
results = all_matches[:limit]
# 构建时间范围描述(正确判断是否为今天)
if start_date.date() == datetime.now().date() and start_date == end_date:
time_range_desc = "今天"
elif start_date == end_date:
time_range_desc = start_date.strftime("%Y-%m-%d")
else:
time_range_desc = f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"
result = {
"success": True,
"summary": {
"total_found": len(all_matches),
"returned_count": len(results),
"requested_limit": limit,
"search_mode": search_mode,
"query": query,
"platforms": platforms or "所有平台",
"time_range": time_range_desc,
"sort_by": sort_by
},
"results": results
}
if search_mode == "fuzzy":
result["summary"]["threshold"] = threshold
if len(all_matches) < limit:
result["note"] = f"模糊搜索模式下,相似度阈值 {threshold} 仅匹配到 {len(all_matches)} 条结果"
return result
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def _search_by_keyword_mode(
self,
query: str,
all_titles: Dict,
id_to_name: Dict,
current_date: datetime,
include_url: bool
) -> List[Dict]:
"""
关键词搜索模式(精确匹配)
Args:
query: 搜索关键词
all_titles: 所有标题字典
id_to_name: 平台ID到名称映射
current_date: 当前日期
Returns:
匹配的新闻列表
"""
matches = []
query_lower = query.lower()
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 精确包含判断
if query_lower in title.lower():
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"date": current_date.strftime("%Y-%m-%d"),
"similarity_score": 1.0, # 精确匹配,相似度为1
"ranks": info.get("ranks", []),
"count": len(info.get("ranks", [])),
"rank": info["ranks"][0] if info["ranks"] else 999
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
matches.append(news_item)
return matches
def _search_by_fuzzy_mode(
self,
query: str,
all_titles: Dict,
id_to_name: Dict,
current_date: datetime,
threshold: float,
include_url: bool
) -> List[Dict]:
"""
模糊搜索模式(使用相似度算法)
Args:
query: 搜索内容
all_titles: 所有标题字典
id_to_name: 平台ID到名称映射
current_date: 当前日期
threshold: 相似度阈值
Returns:
匹配的新闻列表
"""
matches = []
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 模糊匹配
is_match, similarity = self._fuzzy_match(query, title, threshold)
if is_match:
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"date": current_date.strftime("%Y-%m-%d"),
"similarity_score": round(similarity, 4),
"ranks": info.get("ranks", []),
"count": len(info.get("ranks", [])),
"rank": info["ranks"][0] if info["ranks"] else 999
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
matches.append(news_item)
return matches
def _search_by_entity_mode(
self,
query: str,
all_titles: Dict,
id_to_name: Dict,
current_date: datetime,
include_url: bool
) -> List[Dict]:
"""
实体搜索模式(自动按权重排序)
Args:
query: 实体名称
all_titles: 所有标题字典
id_to_name: 平台ID到名称映射
current_date: 当前日期
Returns:
匹配的新闻列表
"""
matches = []
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 实体搜索:精确包含实体名称
if query in title:
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"date": current_date.strftime("%Y-%m-%d"),
"similarity_score": 1.0,
"ranks": info.get("ranks", []),
"count": len(info.get("ranks", [])),
"rank": info["ranks"][0] if info["ranks"] else 999
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
matches.append(news_item)
return matches
def _calculate_similarity(self, text1: str, text2: str) -> float:
"""
计算两个文本的相似度
Args:
text1: 文本1
text2: 文本2
Returns:
相似度分数 (0-1之间)
"""
# 使用 difflib.SequenceMatcher 计算序列相似度
return SequenceMatcher(None, text1.lower(), text2.lower()).ratio()
def _fuzzy_match(self, query: str, text: str, threshold: float = 0.3) -> Tuple[bool, float]:
"""
模糊匹配函数
Args:
query: 查询文本
text: 待匹配文本
threshold: 匹配阈值
Returns:
(是否匹配, 相似度分数)
"""
# 直接包含判断
if query.lower() in text.lower():
return True, 1.0
# 计算整体相似度
similarity = self._calculate_similarity(query, text)
if similarity >= threshold:
return True, similarity
# 分词后的部分匹配
query_words = set(self._extract_keywords(query))
text_words = set(self._extract_keywords(text))
if not query_words or not text_words:
return False, 0.0
# 计算关键词重合度
common_words = query_words & text_words
keyword_overlap = len(common_words) / len(query_words)
if keyword_overlap >= 0.5: # 50%的关键词重合
return True, keyword_overlap
return False, similarity
def _extract_keywords(self, text: str, min_length: int = 2) -> List[str]:
"""
从文本中提取关键词
Args:
text: 输入文本
min_length: 最小词长
Returns:
关键词列表
"""
# 移除URL和特殊字符
text = re.sub(r'http[s]?://\S+', '', text)
text = re.sub(r'\[.*?\]', '', text) # 移除方括号内容
# 使用正则表达式分词(中文和英文)
words = re.findall(r'[\w]+', text)
# 过滤停用词和短词
keywords = [
word for word in words
if word and len(word) >= min_length and word not in self.stopwords
]
return keywords
def _calculate_keyword_overlap(self, keywords1: List[str], keywords2: List[str]) -> float:
"""
计算两个关键词列表的重合度
Args:
keywords1: 关键词列表1
keywords2: 关键词列表2
Returns:
重合度分数 (0-1之间)
"""
if not keywords1 or not keywords2:
return 0.0
set1 = set(keywords1)
set2 = set(keywords2)
# Jaccard 相似度
intersection = len(set1 & set2)
union = len(set1 | set2)
if union == 0:
return 0.0
return intersection / union
def search_related_news_history(
self,
reference_text: str,
time_preset: str = "yesterday",
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
threshold: float = 0.4,
limit: int = 50,
include_url: bool = False
) -> Dict:
"""
在历史数据中搜索与给定新闻相关的新闻
Args:
reference_text: 参考新闻标题或内容
time_preset: 时间范围预设值,可选:
- "yesterday": 昨天
- "last_week": 上周 (7天)
- "last_month": 上个月 (30天)
- "custom": 自定义日期范围(需要提供 start_date 和 end_date)
start_date: 自定义开始日期(仅当 time_preset="custom" 时有效)
end_date: 自定义结束日期(仅当 time_preset="custom" 时有效)
threshold: 相似度阈值 (0-1之间),默认0.4
limit: 返回条数限制,默认50
include_url: 是否包含URL链接,默认False(节省token)
Returns:
搜索结果字典,包含相关新闻列表
Example:
>>> tools = SearchTools()
>>> result = tools.search_related_news_history(
... reference_text="人工智能技术突破",
... time_preset="last_week",
... threshold=0.4,
... limit=50
... )
>>> for news in result['results']:
... print(f"{news['date']}: {news['title']} (相似度: {news['similarity_score']})")
"""
try:
# 参数验证
reference_text = validate_keyword(reference_text)
threshold = max(0.0, min(1.0, threshold))
limit = validate_limit(limit, default=50)
# 确定查询日期范围
today = datetime.now()
if time_preset == "yesterday":
search_start = today - timedelta(days=1)
search_end = today - timedelta(days=1)
elif time_preset == "last_week":
search_start = today - timedelta(days=7)
search_end = today - timedelta(days=1)
elif time_preset == "last_month":
search_start = today - timedelta(days=30)
search_end = today - timedelta(days=1)
elif time_preset == "custom":
if not start_date or not end_date:
raise InvalidParameterError(
"自定义时间范围需要提供 start_date 和 end_date",
suggestion="请提供 start_date 和 end_date 参数"
)
search_start = start_date
search_end = end_date
else:
raise InvalidParameterError(
f"不支持的时间范围: {time_preset}",
suggestion="请使用 'yesterday', 'last_week', 'last_month' 或 'custom'"
)
# 提取参考文本的关键词
reference_keywords = self._extract_keywords(reference_text)
if not reference_keywords:
raise InvalidParameterError(
"无法从参考文本中提取关键词",
suggestion="请提供更详细的文本内容"
)
# 收集所有相关新闻
all_related_news = []
current_date = search_start
while current_date <= search_end:
try:
# 读取该日期的数据
all_titles, id_to_name, _ = self.data_service.parser.read_all_titles_for_date(current_date)
# 搜索相关新闻
for platform_id, titles in all_titles.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles.items():
# 计算标题相似度
title_similarity = self._calculate_similarity(reference_text, title)
# 提取标题关键词
title_keywords = self._extract_keywords(title)
# 计算关键词重合度
keyword_overlap = self._calculate_keyword_overlap(
reference_keywords,
title_keywords
)
# 综合相似度 (70% 关键词重合 + 30% 文本相似度)
combined_score = keyword_overlap * 0.7 + title_similarity * 0.3
if combined_score >= threshold:
news_item = {
"title": title,
"platform": platform_id,
"platform_name": platform_name,
"date": current_date.strftime("%Y-%m-%d"),
"similarity_score": round(combined_score, 4),
"keyword_overlap": round(keyword_overlap, 4),
"text_similarity": round(title_similarity, 4),
"common_keywords": list(set(reference_keywords) & set(title_keywords)),
"rank": info["ranks"][0] if info["ranks"] else 0
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobileUrl"] = info.get("mobileUrl", "")
all_related_news.append(news_item)
except DataNotFoundError:
# 该日期没有数据,继续下一天
pass
except Exception as e:
# 记录错误但继续处理其他日期
print(f"Warning: 处理日期 {current_date.strftime('%Y-%m-%d')} 时出错: {e}")
# 移动到下一天
current_date += timedelta(days=1)
if not all_related_news:
return {
"success": True,
"results": [],
"total": 0,
"query": reference_text,
"time_preset": time_preset,
"date_range": {
"start": search_start.strftime("%Y-%m-%d"),
"end": search_end.strftime("%Y-%m-%d")
},
"message": "未找到相关新闻"
}
# 按相似度排序
all_related_news.sort(key=lambda x: x["similarity_score"], reverse=True)
# 限制返回数量
results = all_related_news[:limit]
# 统计信息
platform_distribution = Counter([news["platform"] for news in all_related_news])
date_distribution = Counter([news["date"] for news in all_related_news])
result = {
"success": True,
"summary": {
"total_found": len(all_related_news),
"returned_count": len(results),
"requested_limit": limit,
"threshold": threshold,
"reference_text": reference_text,
"reference_keywords": reference_keywords,
"time_preset": time_preset,
"date_range": {
"start": search_start.strftime("%Y-%m-%d"),
"end": search_end.strftime("%Y-%m-%d")
}
},
"results": results,
"statistics": {
"platform_distribution": dict(platform_distribution),
"date_distribution": dict(date_distribution),
"avg_similarity": round(
sum([news["similarity_score"] for news in all_related_news]) / len(all_related_news),
4
) if all_related_news else 0.0
}
}
if len(all_related_news) < limit:
result["note"] = f"相关性阈值 {threshold} 下仅找到 {len(all_related_news)} 条相关新闻"
return result
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
|
2302_81331056/TrendRadar
|
mcp_server/tools/search_tools.py
|
Python
|
agpl-3.0
| 26,845
|
"""
系统管理工具
实现系统状态查询和爬虫触发功能。
"""
from pathlib import Path
from typing import Dict, List, Optional
from ..services.data_service import DataService
from ..utils.validators import validate_platforms
from ..utils.errors import MCPError, CrawlTaskError
class SystemManagementTools:
"""系统管理工具类"""
def __init__(self, project_root: str = None):
"""
初始化系统管理工具
Args:
project_root: 项目根目录
"""
self.data_service = DataService(project_root)
if project_root:
self.project_root = Path(project_root)
else:
# 获取项目根目录
current_file = Path(__file__)
self.project_root = current_file.parent.parent.parent
def get_system_status(self) -> Dict:
"""
获取系统运行状态和健康检查信息
Returns:
系统状态字典
Example:
>>> tools = SystemManagementTools()
>>> result = tools.get_system_status()
>>> print(result['system']['version'])
"""
try:
# 获取系统状态
status = self.data_service.get_system_status()
return {
**status,
"success": True
}
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e)
}
}
def trigger_crawl(self, platforms: Optional[List[str]] = None, save_to_local: bool = False, include_url: bool = False) -> Dict:
"""
手动触发一次临时爬取任务(可选持久化)
Args:
platforms: 指定平台列表,为空则爬取所有平台
save_to_local: 是否保存到本地 output 目录,默认 False
include_url: 是否包含URL链接,默认False(节省token)
Returns:
爬取结果字典,包含新闻数据和保存路径(如果保存)
Example:
>>> tools = SystemManagementTools()
>>> # 临时爬取,不保存
>>> result = tools.trigger_crawl(platforms=['zhihu', 'weibo'])
>>> print(result['data'])
>>> # 爬取并保存到本地
>>> result = tools.trigger_crawl(platforms=['zhihu'], save_to_local=True)
>>> print(result['saved_files'])
"""
try:
import json
import time
import random
import requests
from datetime import datetime
import pytz
import yaml
# 参数验证
platforms = validate_platforms(platforms)
# 加载配置文件
config_path = self.project_root / "config" / "config.yaml"
if not config_path.exists():
raise CrawlTaskError(
"配置文件不存在",
suggestion=f"请确保配置文件存在: {config_path}"
)
# 读取配置
with open(config_path, "r", encoding="utf-8") as f:
config_data = yaml.safe_load(f)
# 获取平台配置
all_platforms = config_data.get("platforms", [])
if not all_platforms:
raise CrawlTaskError(
"配置文件中没有平台配置",
suggestion="请检查 config/config.yaml 中的 platforms 配置"
)
# 过滤平台
if platforms:
target_platforms = [p for p in all_platforms if p["id"] in platforms]
if not target_platforms:
raise CrawlTaskError(
f"指定的平台不存在: {platforms}",
suggestion=f"可用平台: {[p['id'] for p in all_platforms]}"
)
else:
target_platforms = all_platforms
# 获取请求间隔
request_interval = config_data.get("crawler", {}).get("request_interval", 100)
# 构建平台ID列表
ids = []
for platform in target_platforms:
if "name" in platform:
ids.append((platform["id"], platform["name"]))
else:
ids.append(platform["id"])
print(f"开始临时爬取,平台: {[p.get('name', p['id']) for p in target_platforms]}")
# 爬取数据
results = {}
id_to_name = {}
failed_ids = []
for i, id_info in enumerate(ids):
if isinstance(id_info, tuple):
id_value, name = id_info
else:
id_value = id_info
name = id_value
id_to_name[id_value] = name
# 构建请求URL
url = f"https://newsnow.busiyi.world/api/s?id={id_value}&latest"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cache-Control": "no-cache",
}
# 重试机制
max_retries = 2
retries = 0
success = False
while retries <= max_retries and not success:
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
data_text = response.text
data_json = json.loads(data_text)
status = data_json.get("status", "未知")
if status not in ["success", "cache"]:
raise ValueError(f"响应状态异常: {status}")
status_info = "最新数据" if status == "success" else "缓存数据"
print(f"获取 {id_value} 成功({status_info})")
# 解析数据
results[id_value] = {}
for index, item in enumerate(data_json.get("items", []), 1):
title = item["title"]
url_link = item.get("url", "")
mobile_url = item.get("mobileUrl", "")
if title in results[id_value]:
results[id_value][title]["ranks"].append(index)
else:
results[id_value][title] = {
"ranks": [index],
"url": url_link,
"mobileUrl": mobile_url,
}
success = True
except Exception as e:
retries += 1
if retries <= max_retries:
wait_time = random.uniform(3, 5)
print(f"请求 {id_value} 失败: {e}. {wait_time:.2f}秒后重试...")
time.sleep(wait_time)
else:
print(f"请求 {id_value} 失败: {e}")
failed_ids.append(id_value)
# 请求间隔
if i < len(ids) - 1:
actual_interval = request_interval + random.randint(-10, 20)
actual_interval = max(50, actual_interval)
time.sleep(actual_interval / 1000)
# 格式化返回数据
news_data = []
for platform_id, titles_data in results.items():
platform_name = id_to_name.get(platform_id, platform_id)
for title, info in titles_data.items():
news_item = {
"platform_id": platform_id,
"platform_name": platform_name,
"title": title,
"ranks": info["ranks"]
}
# 条件性添加 URL 字段
if include_url:
news_item["url"] = info.get("url", "")
news_item["mobile_url"] = info.get("mobileUrl", "")
news_data.append(news_item)
# 获取北京时间
beijing_tz = pytz.timezone("Asia/Shanghai")
now = datetime.now(beijing_tz)
# 构建返回结果
result = {
"success": True,
"task_id": f"crawl_{int(time.time())}",
"status": "completed",
"crawl_time": now.strftime("%Y-%m-%d %H:%M:%S"),
"platforms": list(results.keys()),
"total_news": len(news_data),
"failed_platforms": failed_ids,
"data": news_data,
"saved_to_local": save_to_local
}
# 如果需要持久化,调用保存逻辑
if save_to_local:
try:
import re
# 辅助函数:清理标题
def clean_title(title: str) -> str:
"""清理标题中的特殊字符"""
if not isinstance(title, str):
title = str(title)
cleaned_title = title.replace("\n", " ").replace("\r", " ")
cleaned_title = re.sub(r"\s+", " ", cleaned_title)
cleaned_title = cleaned_title.strip()
return cleaned_title
# 辅助函数:创建目录
def ensure_directory_exists(directory: str):
"""确保目录存在"""
Path(directory).mkdir(parents=True, exist_ok=True)
# 格式化日期和时间
date_folder = now.strftime("%Y年%m月%d日")
time_filename = now.strftime("%H时%M分")
# 创建 txt 文件路径
txt_dir = self.project_root / "output" / date_folder / "txt"
ensure_directory_exists(str(txt_dir))
txt_file_path = txt_dir / f"{time_filename}.txt"
# 创建 html 文件路径
html_dir = self.project_root / "output" / date_folder / "html"
ensure_directory_exists(str(html_dir))
html_file_path = html_dir / f"{time_filename}.html"
# 保存 txt 文件(按照 main.py 的格式)
with open(txt_file_path, "w", encoding="utf-8") as f:
for id_value, title_data in results.items():
# id | name 或 id
name = id_to_name.get(id_value)
if name and name != id_value:
f.write(f"{id_value} | {name}\n")
else:
f.write(f"{id_value}\n")
# 按排名排序标题
sorted_titles = []
for title, info in title_data.items():
cleaned = clean_title(title)
if isinstance(info, dict):
ranks = info.get("ranks", [])
url = info.get("url", "")
mobile_url = info.get("mobileUrl", "")
else:
ranks = info if isinstance(info, list) else []
url = ""
mobile_url = ""
rank = ranks[0] if ranks else 1
sorted_titles.append((rank, cleaned, url, mobile_url))
sorted_titles.sort(key=lambda x: x[0])
for rank, cleaned, url, mobile_url in sorted_titles:
line = f"{rank}. {cleaned}"
if url:
line += f" [URL:{url}]"
if mobile_url:
line += f" [MOBILE:{mobile_url}]"
f.write(line + "\n")
f.write("\n")
if failed_ids:
f.write("==== 以下ID请求失败 ====\n")
for id_value in failed_ids:
f.write(f"{id_value}\n")
# 保存 html 文件(简化版)
html_content = self._generate_simple_html(results, id_to_name, failed_ids, now)
with open(html_file_path, "w", encoding="utf-8") as f:
f.write(html_content)
print(f"数据已保存到:")
print(f" TXT: {txt_file_path}")
print(f" HTML: {html_file_path}")
result["saved_files"] = {
"txt": str(txt_file_path),
"html": str(html_file_path)
}
result["note"] = "数据已持久化到 output 文件夹"
except Exception as e:
print(f"保存文件失败: {e}")
result["save_error"] = str(e)
result["note"] = "爬取成功但保存失败,数据仅在内存中"
else:
result["note"] = "临时爬取结果,未持久化到output文件夹"
return result
except MCPError as e:
return {
"success": False,
"error": e.to_dict()
}
except Exception as e:
import traceback
return {
"success": False,
"error": {
"code": "INTERNAL_ERROR",
"message": str(e),
"traceback": traceback.format_exc()
}
}
def _generate_simple_html(self, results: Dict, id_to_name: Dict, failed_ids: List, now) -> str:
"""生成简化的 HTML 报告"""
html = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>MCP 爬取结果</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; background: #f5f5f5; }
.container { max-width: 900px; margin: 0 auto; background: white; padding: 20px; border-radius: 8px; }
h1 { color: #333; border-bottom: 2px solid #4CAF50; padding-bottom: 10px; }
.platform { margin-bottom: 30px; }
.platform-name { background: #4CAF50; color: white; padding: 10px; border-radius: 5px; margin-bottom: 10px; }
.news-item { padding: 8px; border-bottom: 1px solid #eee; }
.rank { color: #666; font-weight: bold; margin-right: 10px; }
.title { color: #333; }
.link { color: #1976D2; text-decoration: none; margin-left: 10px; font-size: 0.9em; }
.link:hover { text-decoration: underline; }
.failed { background: #ffebee; padding: 10px; border-radius: 5px; margin-top: 20px; }
.failed h3 { color: #c62828; margin-top: 0; }
.timestamp { color: #666; font-size: 0.9em; text-align: right; margin-top: 20px; }
</style>
</head>
<body>
<div class="container">
<h1>MCP 爬取结果</h1>
"""
# 添加时间戳
html += f' <p class="timestamp">爬取时间: {now.strftime("%Y-%m-%d %H:%M:%S")}</p>\n\n'
# 遍历每个平台
for platform_id, titles_data in results.items():
platform_name = id_to_name.get(platform_id, platform_id)
html += f' <div class="platform">\n'
html += f' <div class="platform-name">{platform_name}</div>\n'
# 排序标题
sorted_items = []
for title, info in titles_data.items():
ranks = info.get("ranks", [])
url = info.get("url", "")
mobile_url = info.get("mobileUrl", "")
rank = ranks[0] if ranks else 999
sorted_items.append((rank, title, url, mobile_url))
sorted_items.sort(key=lambda x: x[0])
# 显示新闻
for rank, title, url, mobile_url in sorted_items:
html += f' <div class="news-item">\n'
html += f' <span class="rank">{rank}.</span>\n'
html += f' <span class="title">{self._html_escape(title)}</span>\n'
if url:
html += f' <a class="link" href="{self._html_escape(url)}" target="_blank">链接</a>\n'
if mobile_url and mobile_url != url:
html += f' <a class="link" href="{self._html_escape(mobile_url)}" target="_blank">移动版</a>\n'
html += ' </div>\n'
html += ' </div>\n\n'
# 失败的平台
if failed_ids:
html += ' <div class="failed">\n'
html += ' <h3>请求失败的平台</h3>\n'
html += ' <ul>\n'
for platform_id in failed_ids:
html += f' <li>{self._html_escape(platform_id)}</li>\n'
html += ' </ul>\n'
html += ' </div>\n'
html += """ </div>
</body>
</html>"""
return html
def _html_escape(self, text: str) -> str:
"""HTML 转义"""
if not isinstance(text, str):
text = str(text)
return (
text.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
.replace("'", "'")
)
|
2302_81331056/TrendRadar
|
mcp_server/tools/system.py
|
Python
|
agpl-3.0
| 18,584
|
"""
工具类模块
提供参数验证、错误处理等辅助功能。
"""
|
2302_81331056/TrendRadar
|
mcp_server/utils/__init__.py
|
Python
|
agpl-3.0
| 77
|
"""
日期解析工具
支持多种自然语言日期格式解析,包括相对日期和绝对日期。
"""
import re
from datetime import datetime, timedelta
from .errors import InvalidParameterError
class DateParser:
"""日期解析器类"""
# 中文日期映射
CN_DATE_MAPPING = {
"今天": 0,
"昨天": 1,
"前天": 2,
"大前天": 3,
}
# 英文日期映射
EN_DATE_MAPPING = {
"today": 0,
"yesterday": 1,
}
# 星期映射
WEEKDAY_CN = {
"一": 0, "二": 1, "三": 2, "四": 3,
"五": 4, "六": 5, "日": 6, "天": 6
}
WEEKDAY_EN = {
"monday": 0, "tuesday": 1, "wednesday": 2, "thursday": 3,
"friday": 4, "saturday": 5, "sunday": 6
}
@staticmethod
def parse_date_query(date_query: str) -> datetime:
"""
解析日期查询字符串
支持的格式:
- 相对日期(中文):今天、昨天、前天、大前天、N天前
- 相对日期(英文):today、yesterday、N days ago
- 星期(中文):上周一、上周二、本周三
- 星期(英文):last monday、this friday
- 绝对日期:2025-10-10、10月10日、2025年10月10日
Args:
date_query: 日期查询字符串
Returns:
datetime对象
Raises:
InvalidParameterError: 日期格式无法识别
Examples:
>>> DateParser.parse_date_query("今天")
datetime(2025, 10, 11)
>>> DateParser.parse_date_query("昨天")
datetime(2025, 10, 10)
>>> DateParser.parse_date_query("3天前")
datetime(2025, 10, 8)
>>> DateParser.parse_date_query("2025-10-10")
datetime(2025, 10, 10)
"""
if not date_query or not isinstance(date_query, str):
raise InvalidParameterError(
"日期查询字符串不能为空",
suggestion="请提供有效的日期查询,如:今天、昨天、2025-10-10"
)
date_query = date_query.strip().lower()
# 1. 尝试解析中文常用相对日期
if date_query in DateParser.CN_DATE_MAPPING:
days_ago = DateParser.CN_DATE_MAPPING[date_query]
return datetime.now() - timedelta(days=days_ago)
# 2. 尝试解析英文常用相对日期
if date_query in DateParser.EN_DATE_MAPPING:
days_ago = DateParser.EN_DATE_MAPPING[date_query]
return datetime.now() - timedelta(days=days_ago)
# 3. 尝试解析 "N天前" 或 "N days ago"
cn_days_ago_match = re.match(r'(\d+)\s*天前', date_query)
if cn_days_ago_match:
days = int(cn_days_ago_match.group(1))
if days > 365:
raise InvalidParameterError(
f"天数过大: {days}天",
suggestion="请使用小于365天的相对日期或使用绝对日期"
)
return datetime.now() - timedelta(days=days)
en_days_ago_match = re.match(r'(\d+)\s*days?\s+ago', date_query)
if en_days_ago_match:
days = int(en_days_ago_match.group(1))
if days > 365:
raise InvalidParameterError(
f"天数过大: {days}天",
suggestion="请使用小于365天的相对日期或使用绝对日期"
)
return datetime.now() - timedelta(days=days)
# 4. 尝试解析星期(中文):上周一、本周三
cn_weekday_match = re.match(r'(上|本)周([一二三四五六日天])', date_query)
if cn_weekday_match:
week_type = cn_weekday_match.group(1) # 上 或 本
weekday_str = cn_weekday_match.group(2)
target_weekday = DateParser.WEEKDAY_CN[weekday_str]
return DateParser._get_date_by_weekday(target_weekday, week_type == "上")
# 5. 尝试解析星期(英文):last monday、this friday
en_weekday_match = re.match(r'(last|this)\s+(monday|tuesday|wednesday|thursday|friday|saturday|sunday)', date_query)
if en_weekday_match:
week_type = en_weekday_match.group(1) # last 或 this
weekday_str = en_weekday_match.group(2)
target_weekday = DateParser.WEEKDAY_EN[weekday_str]
return DateParser._get_date_by_weekday(target_weekday, week_type == "last")
# 6. 尝试解析绝对日期:YYYY-MM-DD
iso_date_match = re.match(r'(\d{4})-(\d{1,2})-(\d{1,2})', date_query)
if iso_date_match:
year = int(iso_date_match.group(1))
month = int(iso_date_match.group(2))
day = int(iso_date_match.group(3))
try:
return datetime(year, month, day)
except ValueError as e:
raise InvalidParameterError(
f"无效的日期: {date_query}",
suggestion=f"日期值错误: {str(e)}"
)
# 7. 尝试解析中文日期:MM月DD日 或 YYYY年MM月DD日
cn_date_match = re.match(r'(?:(\d{4})年)?(\d{1,2})月(\d{1,2})日', date_query)
if cn_date_match:
year_str = cn_date_match.group(1)
month = int(cn_date_match.group(2))
day = int(cn_date_match.group(3))
# 如果没有年份,使用当前年份
if year_str:
year = int(year_str)
else:
year = datetime.now().year
# 如果月份大于当前月份,说明是去年
current_month = datetime.now().month
if month > current_month:
year -= 1
try:
return datetime(year, month, day)
except ValueError as e:
raise InvalidParameterError(
f"无效的日期: {date_query}",
suggestion=f"日期值错误: {str(e)}"
)
# 8. 尝试解析斜杠格式:YYYY/MM/DD 或 MM/DD
slash_date_match = re.match(r'(?:(\d{4})/)?(\d{1,2})/(\d{1,2})', date_query)
if slash_date_match:
year_str = slash_date_match.group(1)
month = int(slash_date_match.group(2))
day = int(slash_date_match.group(3))
if year_str:
year = int(year_str)
else:
year = datetime.now().year
current_month = datetime.now().month
if month > current_month:
year -= 1
try:
return datetime(year, month, day)
except ValueError as e:
raise InvalidParameterError(
f"无效的日期: {date_query}",
suggestion=f"日期值错误: {str(e)}"
)
# 如果所有格式都不匹配
raise InvalidParameterError(
f"无法识别的日期格式: {date_query}",
suggestion=(
"支持的格式:\n"
"- 相对日期: 今天、昨天、前天、3天前、today、yesterday、3 days ago\n"
"- 星期: 上周一、本周三、last monday、this friday\n"
"- 绝对日期: 2025-10-10、10月10日、2025年10月10日"
)
)
@staticmethod
def _get_date_by_weekday(target_weekday: int, is_last_week: bool) -> datetime:
"""
根据星期几获取日期
Args:
target_weekday: 目标星期 (0=周一, 6=周日)
is_last_week: 是否是上周
Returns:
datetime对象
"""
today = datetime.now()
current_weekday = today.weekday()
# 计算天数差
if is_last_week:
# 上周的某一天
days_diff = current_weekday - target_weekday + 7
else:
# 本周的某一天
days_diff = current_weekday - target_weekday
if days_diff < 0:
days_diff += 7
return today - timedelta(days=days_diff)
@staticmethod
def format_date_folder(date: datetime) -> str:
"""
将日期格式化为文件夹名称
Args:
date: datetime对象
Returns:
文件夹名称,格式: YYYY年MM月DD日
Examples:
>>> DateParser.format_date_folder(datetime(2025, 10, 11))
'2025年10月11日'
"""
return date.strftime("%Y年%m月%d日")
@staticmethod
def validate_date_not_future(date: datetime) -> None:
"""
验证日期不在未来
Args:
date: 待验证的日期
Raises:
InvalidParameterError: 日期在未来
"""
if date.date() > datetime.now().date():
raise InvalidParameterError(
f"不能查询未来的日期: {date.strftime('%Y-%m-%d')}",
suggestion="请使用今天或过去的日期"
)
@staticmethod
def validate_date_not_too_old(date: datetime, max_days: int = 365) -> None:
"""
验证日期不太久远
Args:
date: 待验证的日期
max_days: 最大天数
Raises:
InvalidParameterError: 日期太久远
"""
days_ago = (datetime.now().date() - date.date()).days
if days_ago > max_days:
raise InvalidParameterError(
f"日期太久远: {date.strftime('%Y-%m-%d')} ({days_ago}天前)",
suggestion=f"请查询{max_days}天内的数据"
)
|
2302_81331056/TrendRadar
|
mcp_server/utils/date_parser.py
|
Python
|
agpl-3.0
| 9,698
|
"""
自定义错误类
定义MCP Server使用的所有自定义异常类型。
"""
from typing import Optional
class MCPError(Exception):
"""MCP工具错误基类"""
def __init__(self, message: str, code: str = "MCP_ERROR", suggestion: Optional[str] = None):
super().__init__(message)
self.code = code
self.message = message
self.suggestion = suggestion
def to_dict(self) -> dict:
"""转换为字典格式"""
error_dict = {
"code": self.code,
"message": self.message
}
if self.suggestion:
error_dict["suggestion"] = self.suggestion
return error_dict
class DataNotFoundError(MCPError):
"""数据不存在错误"""
def __init__(self, message: str, suggestion: Optional[str] = None):
super().__init__(
message=message,
code="DATA_NOT_FOUND",
suggestion=suggestion or "请检查日期范围或等待爬取任务完成"
)
class InvalidParameterError(MCPError):
"""参数无效错误"""
def __init__(self, message: str, suggestion: Optional[str] = None):
super().__init__(
message=message,
code="INVALID_PARAMETER",
suggestion=suggestion or "请检查参数格式是否正确"
)
class ConfigurationError(MCPError):
"""配置错误"""
def __init__(self, message: str, suggestion: Optional[str] = None):
super().__init__(
message=message,
code="CONFIGURATION_ERROR",
suggestion=suggestion or "请检查配置文件是否正确"
)
class PlatformNotSupportedError(MCPError):
"""平台不支持错误"""
def __init__(self, platform: str):
super().__init__(
message=f"平台 '{platform}' 不受支持",
code="PLATFORM_NOT_SUPPORTED",
suggestion="支持的平台: zhihu, weibo, douyin, bilibili, baidu, toutiao, qq, 36kr, sspai, hellogithub, thepaper"
)
class CrawlTaskError(MCPError):
"""爬取任务错误"""
def __init__(self, message: str, suggestion: Optional[str] = None):
super().__init__(
message=message,
code="CRAWL_TASK_ERROR",
suggestion=suggestion or "请稍后重试或查看日志"
)
class FileParseError(MCPError):
"""文件解析错误"""
def __init__(self, file_path: str, reason: str):
super().__init__(
message=f"解析文件 {file_path} 失败: {reason}",
code="FILE_PARSE_ERROR",
suggestion="请检查文件格式是否正确"
)
|
2302_81331056/TrendRadar
|
mcp_server/utils/errors.py
|
Python
|
agpl-3.0
| 2,642
|
"""
参数验证工具
提供统一的参数验证功能。
"""
from datetime import datetime
from typing import List, Optional
import os
import yaml
from .errors import InvalidParameterError
from .date_parser import DateParser
def get_supported_platforms() -> List[str]:
"""
从 config.yaml 动态获取支持的平台列表
Returns:
平台ID列表
Note:
- 读取失败时返回空列表,允许所有平台通过(降级策略)
- 平台列表来自 config/config.yaml 中的 platforms 配置
"""
try:
# 获取 config.yaml 路径(相对于当前文件)
current_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(current_dir, "..", "..", "config", "config.yaml")
config_path = os.path.normpath(config_path)
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
platforms = config.get('platforms', [])
return [p['id'] for p in platforms if 'id' in p]
except Exception as e:
# 降级方案:返回空列表,允许所有平台
print(f"警告:无法加载平台配置 ({config_path}): {e}")
return []
def validate_platforms(platforms: Optional[List[str]]) -> List[str]:
"""
验证平台列表
Args:
platforms: 平台ID列表,None表示使用 config.yaml 中配置的所有平台
Returns:
验证后的平台列表
Raises:
InvalidParameterError: 平台不支持
Note:
- platforms=None 时,返回 config.yaml 中配置的平台列表
- 会验证平台ID是否在 config.yaml 的 platforms 配置中
- 配置加载失败时,允许所有平台通过(降级策略)
"""
supported_platforms = get_supported_platforms()
if platforms is None:
# 返回配置文件中的平台列表(用户的默认配置)
return supported_platforms if supported_platforms else []
if not isinstance(platforms, list):
raise InvalidParameterError("platforms 参数必须是列表类型")
if not platforms:
# 空列表时,返回配置文件中的平台列表
return supported_platforms if supported_platforms else []
# 如果配置加载失败(supported_platforms为空),允许所有平台通过
if not supported_platforms:
print("警告:平台配置未加载,跳过平台验证")
return platforms
# 验证每个平台是否在配置中
invalid_platforms = [p for p in platforms if p not in supported_platforms]
if invalid_platforms:
raise InvalidParameterError(
f"不支持的平台: {', '.join(invalid_platforms)}",
suggestion=f"支持的平台(来自config.yaml): {', '.join(supported_platforms)}"
)
return platforms
def validate_limit(limit: Optional[int], default: int = 20, max_limit: int = 1000) -> int:
"""
验证数量限制参数
Args:
limit: 限制数量
default: 默认值
max_limit: 最大限制
Returns:
验证后的限制值
Raises:
InvalidParameterError: 参数无效
"""
if limit is None:
return default
if not isinstance(limit, int):
raise InvalidParameterError("limit 参数必须是整数类型")
if limit <= 0:
raise InvalidParameterError("limit 必须大于0")
if limit > max_limit:
raise InvalidParameterError(
f"limit 不能超过 {max_limit}",
suggestion=f"请使用分页或降低limit值"
)
return limit
def validate_date(date_str: str) -> datetime:
"""
验证日期格式
Args:
date_str: 日期字符串 (YYYY-MM-DD)
Returns:
datetime对象
Raises:
InvalidParameterError: 日期格式错误
"""
try:
return datetime.strptime(date_str, "%Y-%m-%d")
except ValueError:
raise InvalidParameterError(
f"日期格式错误: {date_str}",
suggestion="请使用 YYYY-MM-DD 格式,例如: 2025-10-11"
)
def validate_date_range(date_range: Optional[dict]) -> Optional[tuple]:
"""
验证日期范围
Args:
date_range: 日期范围字典 {"start": "YYYY-MM-DD", "end": "YYYY-MM-DD"}
Returns:
(start_date, end_date) 元组,或 None
Raises:
InvalidParameterError: 日期范围无效
"""
if date_range is None:
return None
if not isinstance(date_range, dict):
raise InvalidParameterError("date_range 必须是字典类型")
start_str = date_range.get("start")
end_str = date_range.get("end")
if not start_str or not end_str:
raise InvalidParameterError(
"date_range 必须包含 start 和 end 字段",
suggestion='例如: {"start": "2025-10-01", "end": "2025-10-11"}'
)
start_date = validate_date(start_str)
end_date = validate_date(end_str)
if start_date > end_date:
raise InvalidParameterError(
"开始日期不能晚于结束日期",
suggestion=f"start: {start_str}, end: {end_str}"
)
# 检查日期是否在未来
today = datetime.now().date()
if start_date.date() > today or end_date.date() > today:
# 获取可用日期范围提示
try:
from ..services.data_service import DataService
data_service = DataService()
earliest, latest = data_service.get_available_date_range()
if earliest and latest:
available_range = f"{earliest.strftime('%Y-%m-%d')} 至 {latest.strftime('%Y-%m-%d')}"
else:
available_range = "无可用数据"
except Exception:
available_range = "未知(请检查 output 目录)"
future_dates = []
if start_date.date() > today:
future_dates.append(start_str)
if end_date.date() > today and end_str != start_str:
future_dates.append(end_str)
raise InvalidParameterError(
f"不允许查询未来日期: {', '.join(future_dates)}(当前日期: {today.strftime('%Y-%m-%d')})",
suggestion=f"当前可用数据范围: {available_range}"
)
return (start_date, end_date)
def validate_keyword(keyword: str) -> str:
"""
验证关键词
Args:
keyword: 搜索关键词
Returns:
处理后的关键词
Raises:
InvalidParameterError: 关键词无效
"""
if not keyword:
raise InvalidParameterError("keyword 不能为空")
if not isinstance(keyword, str):
raise InvalidParameterError("keyword 必须是字符串类型")
keyword = keyword.strip()
if not keyword:
raise InvalidParameterError("keyword 不能为空白字符")
if len(keyword) > 100:
raise InvalidParameterError(
"keyword 长度不能超过100个字符",
suggestion="请使用更简洁的关键词"
)
return keyword
def validate_top_n(top_n: Optional[int], default: int = 10) -> int:
"""
验证TOP N参数
Args:
top_n: TOP N数量
default: 默认值
Returns:
验证后的值
Raises:
InvalidParameterError: 参数无效
"""
return validate_limit(top_n, default=default, max_limit=100)
def validate_mode(mode: Optional[str], valid_modes: List[str], default: str) -> str:
"""
验证模式参数
Args:
mode: 模式字符串
valid_modes: 有效模式列表
default: 默认模式
Returns:
验证后的模式
Raises:
InvalidParameterError: 模式无效
"""
if mode is None:
return default
if not isinstance(mode, str):
raise InvalidParameterError("mode 必须是字符串类型")
if mode not in valid_modes:
raise InvalidParameterError(
f"无效的模式: {mode}",
suggestion=f"支持的模式: {', '.join(valid_modes)}"
)
return mode
def validate_config_section(section: Optional[str]) -> str:
"""
验证配置节参数
Args:
section: 配置节名称
Returns:
验证后的配置节
Raises:
InvalidParameterError: 配置节无效
"""
valid_sections = ["all", "crawler", "push", "keywords", "weights"]
return validate_mode(section, valid_sections, "all")
def validate_date_query(
date_query: str,
allow_future: bool = False,
max_days_ago: int = 365
) -> datetime:
"""
验证并解析日期查询字符串
Args:
date_query: 日期查询字符串
allow_future: 是否允许未来日期
max_days_ago: 允许查询的最大天数
Returns:
解析后的datetime对象
Raises:
InvalidParameterError: 日期查询无效
Examples:
>>> validate_date_query("昨天")
datetime(2025, 10, 10)
>>> validate_date_query("2025-10-10")
datetime(2025, 10, 10)
"""
if not date_query:
raise InvalidParameterError(
"日期查询字符串不能为空",
suggestion="请提供日期查询,如:今天、昨天、2025-10-10"
)
# 使用DateParser解析日期
parsed_date = DateParser.parse_date_query(date_query)
# 验证日期不在未来
if not allow_future:
DateParser.validate_date_not_future(parsed_date)
# 验证日期不太久远
DateParser.validate_date_not_too_old(parsed_date, max_days=max_days_ago)
return parsed_date
|
2302_81331056/TrendRadar
|
mcp_server/utils/validators.py
|
Python
|
agpl-3.0
| 9,637
|
#!/bin/bash
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
BOLD='\033[1m'
NC='\033[0m' # No Color
echo -e "${BOLD}╔════════════════════════════════════════╗${NC}"
echo -e "${BOLD}║ TrendRadar MCP 一键部署 (Mac) ║${NC}"
echo -e "${BOLD}╚════════════════════════════════════════╝${NC}"
echo ""
# 获取项目根目录
PROJECT_ROOT="$(cd "$(dirname "$0")" && pwd)"
echo -e "📍 项目目录: ${BLUE}${PROJECT_ROOT}${NC}"
echo ""
# 检查 UV 是否已安装
if ! command -v uv &> /dev/null; then
echo -e "${YELLOW}[1/3] 🔧 UV 未安装,正在自动安装...${NC}"
echo "提示: UV 是一个快速的 Python 包管理器,只需安装一次"
echo ""
curl -LsSf https://astral.sh/uv/install.sh | sh
echo ""
echo "正在刷新 PATH 环境变量..."
echo ""
# 添加 UV 到 PATH
export PATH="$HOME/.cargo/bin:$PATH"
# 验证 UV 是否真正可用
if ! command -v uv &> /dev/null; then
echo -e "${RED}❌ [错误] UV 安装失败${NC}"
echo ""
echo "可能的原因:"
echo " 1. 网络连接问题,无法下载安装脚本"
echo " 2. 安装路径权限不足"
echo " 3. 安装脚本执行异常"
echo ""
echo "解决方案:"
echo " 1. 检查网络连接是否正常"
echo " 2. 手动安装: https://docs.astral.sh/uv/getting-started/installation/"
echo " 3. 或运行: curl -LsSf https://astral.sh/uv/install.sh | sh"
exit 1
fi
echo -e "${GREEN}✅ [成功] UV 已安装${NC}"
echo -e "${YELLOW}⚠️ 请重新运行此脚本以继续${NC}"
exit 0
else
echo -e "${GREEN}[1/3] ✅ UV 已安装${NC}"
uv --version
fi
echo ""
echo "[2/3] 📦 安装项目依赖..."
echo "提示: 这可能需要 1-2 分钟,请耐心等待"
echo ""
# 创建虚拟环境并安装依赖
uv sync
if [ $? -ne 0 ]; then
echo ""
echo -e "${RED}❌ [错误] 依赖安装失败${NC}"
echo "请检查网络连接后重试"
exit 1
fi
echo ""
echo -e "${GREEN}[3/3] ✅ 检查配置文件...${NC}"
echo ""
# 检查配置文件
if [ ! -f "config/config.yaml" ]; then
echo -e "${YELLOW}⚠️ [警告] 未找到配置文件: config/config.yaml${NC}"
echo "请确保配置文件存在"
echo ""
fi
# 添加执行权限
chmod +x start-http.sh 2>/dev/null || true
# 获取 UV 路径
UV_PATH=$(which uv)
echo ""
echo -e "${BOLD}╔════════════════════════════════════════╗${NC}"
echo -e "${BOLD}║ 部署完成! ║${NC}"
echo -e "${BOLD}╚════════════════════════════════════════╝${NC}"
echo ""
echo "📋 下一步操作:"
echo ""
echo " 1️⃣ 打开 Cherry Studio"
echo " 2️⃣ 进入 设置 > MCP Servers > 添加服务器"
echo " 3️⃣ 填入以下配置:"
echo ""
echo " 名称: TrendRadar"
echo " 描述: 新闻热点聚合工具"
echo " 类型: STDIO"
echo -e " 命令: ${BLUE}${UV_PATH}${NC}"
echo " 参数(每个占一行):"
echo -e " ${BLUE}--directory${NC}"
echo -e " ${BLUE}${PROJECT_ROOT}${NC}"
echo -e " ${BLUE}run${NC}"
echo -e " ${BLUE}python${NC}"
echo -e " ${BLUE}-m${NC}"
echo -e " ${BLUE}mcp_server.server${NC}"
echo ""
echo " 4️⃣ 保存并启用 MCP 开关"
echo ""
echo "📖 详细教程请查看: README-Cherry-Studio.md,本窗口别关,待会儿用于填入参数"
echo ""
|
2302_81331056/TrendRadar
|
setup-mac.sh
|
Shell
|
agpl-3.0
| 3,798
|
@echo off
setlocal enabledelayedexpansion
echo ==========================================
echo TrendRadar MCP Setup (Windows)
echo ==========================================
echo:
REM Fix: Use script location instead of current working directory
set "PROJECT_ROOT=%~dp0"
REM Remove trailing backslash
if "%PROJECT_ROOT:~-1%"=="\" set "PROJECT_ROOT=%PROJECT_ROOT:~0,-1%"
echo Project Directory: %PROJECT_ROOT%
echo:
REM Change to project directory
cd /d "%PROJECT_ROOT%"
if %errorlevel% neq 0 (
echo [ERROR] Cannot access project directory
pause
exit /b 1
)
REM Validate project structure
echo [0/4] Validating project structure...
if not exist "pyproject.toml" (
echo [ERROR] pyproject.toml not found in: %PROJECT_ROOT%
echo:
echo This should not happen! Please check:
echo 1. Is setup-windows.bat in the project root?
echo 2. Was the project properly cloned/downloaded?
echo:
echo Files in current directory:
dir /b
echo:
pause
exit /b 1
)
echo [OK] pyproject.toml found
echo:
REM Check Python
echo [1/4] Checking Python...
python --version >nul 2>&1
if %errorlevel% neq 0 (
echo [ERROR] Python not detected. Please install Python 3.10+
echo Download: https://www.python.org/downloads/
pause
exit /b 1
)
for /f "tokens=*" %%i in ('python --version') do echo [OK] %%i
echo:
REM Check UV
echo [2/4] Checking UV...
where uv >nul 2>&1
if %errorlevel% neq 0 (
echo UV not installed, installing automatically...
echo:
echo Trying installation method 1: PowerShell...
powershell -ExecutionPolicy Bypass -Command "try { irm https://astral.sh/uv/install.ps1 | iex; exit 0 } catch { Write-Host 'PowerShell method failed'; exit 1 }"
if %errorlevel% neq 0 (
echo:
echo Method 1 failed. Trying method 2: pip...
python -m pip install --upgrade uv
if %errorlevel% neq 0 (
echo:
echo [ERROR] Automatic installation failed
echo:
echo Please install UV manually using one of these methods:
echo:
echo Method 1 - pip:
echo python -m pip install uv
echo:
echo Method 2 - pipx:
echo pip install pipx
echo pipx install uv
echo:
echo Method 3 - Manual download:
echo Visit: https://docs.astral.sh/uv/getting-started/installation/
echo:
pause
exit /b 1
)
)
echo:
echo [SUCCESS] UV installed successfully!
echo:
echo [IMPORTANT] Please restart your terminal:
echo 1. Close this window
echo 2. Open a new Command Prompt
echo 3. Navigate to: %PROJECT_ROOT%
echo 4. Run: setup-windows.bat
echo:
pause
exit /b 0
) else (
for /f "tokens=*" %%i in ('uv --version') do echo [OK] %%i
)
echo:
echo [3/4] Installing dependencies...
echo Working directory: %PROJECT_ROOT%
echo:
REM Ensure we're in the project directory
cd /d "%PROJECT_ROOT%"
uv sync
if %errorlevel% neq 0 (
echo:
echo [ERROR] Dependency installation failed
echo:
echo Troubleshooting steps:
echo 1. Check your internet connection
echo 2. Verify Python version ^>= 3.10: python --version
echo 3. Try with verbose output: uv sync --verbose
echo 4. Check if pyproject.toml is valid
echo:
echo Project directory: %PROJECT_ROOT%
echo:
pause
exit /b 1
)
echo:
echo [OK] Dependencies installed successfully
echo:
echo [4/4] Checking configuration file...
if not exist "config\config.yaml" (
echo [WARNING] config\config.yaml not found
if exist "config\config.example.yaml" (
echo:
echo To create your configuration:
echo 1. Copy: copy config\config.example.yaml config\config.yaml
echo 2. Edit: notepad config\config.yaml
echo 3. Add your API keys
)
echo:
) else (
echo [OK] config\config.yaml exists
)
echo:
REM Get UV path
for /f "tokens=*" %%i in ('where uv 2^>nul') do set "UV_PATH=%%i"
if not defined UV_PATH (
set "UV_PATH=uv"
)
echo:
echo ==========================================
echo Setup Complete!
echo ==========================================
echo:
echo MCP Server Configuration for Claude Desktop:
echo:
echo Command: %UV_PATH%
echo Working Directory: %PROJECT_ROOT%
echo:
echo Arguments (one per line):
echo --directory
echo %PROJECT_ROOT%
echo run
echo python
echo -m
echo mcp_server.server
echo:
echo Configuration guide: README-Cherry-Studio.md
echo:
echo:
pause
|
2302_81331056/TrendRadar
|
setup-windows-en.bat
|
Batchfile
|
agpl-3.0
| 4,630
|
@echo off
chcp 65001 >nul
setlocal enabledelayedexpansion
echo ==========================================
echo TrendRadar MCP 一键部署 (Windows)
echo ==========================================
echo.
REM 修复:使用脚本所在目录,而不是当前工作目录
set "PROJECT_ROOT=%~dp0"
REM 移除末尾的反斜杠
if "%PROJECT_ROOT:~-1%"=="\" set "PROJECT_ROOT=%PROJECT_ROOT:~0,-1%"
echo 📍 项目目录: %PROJECT_ROOT%
echo.
REM 切换到项目目录
cd /d "%PROJECT_ROOT%"
if %errorlevel% neq 0 (
echo ❌ 无法访问项目目录
pause
exit /b 1
)
REM 验证项目结构
echo [0/4] 🔍 验证项目结构...
if not exist "pyproject.toml" (
echo ❌ 未找到 pyproject.toml 文件: %PROJECT_ROOT%
echo.
echo 请检查:
echo 1. setup-windows.bat 是否在项目根目录?
echo 2. 项目文件是否完整?
echo.
echo 当前目录内容:
dir /b
echo.
pause
exit /b 1
)
echo ✅ pyproject.toml 已找到
echo.
REM 检查 Python
echo [1/4] 🐍 检查 Python...
python --version >nul 2>&1
if %errorlevel% neq 0 (
echo ❌ 未检测到 Python,请先安装 Python 3.10+
echo 下载地址: https://www.python.org/downloads/
pause
exit /b 1
)
for /f "tokens=*" %%i in ('python --version') do echo ✅ %%i
echo.
REM 检查 UV
echo [2/4] 🔧 检查 UV...
where uv >nul 2>&1
if %errorlevel% neq 0 (
echo UV 未安装,正在自动安装...
echo.
echo 尝试方法1: PowerShell 安装...
powershell -ExecutionPolicy Bypass -Command "try { irm https://astral.sh/uv/install.ps1 | iex; exit 0 } catch { Write-Host 'PowerShell 安装失败'; exit 1 }"
if %errorlevel% neq 0 (
echo.
echo 方法1失败,尝试方法2: pip 安装...
python -m pip install --upgrade uv
if %errorlevel% neq 0 (
echo.
echo ❌ 自动安装失败
echo.
echo 请手动安装 UV,可选方法:
echo.
echo 方法1 - pip:
echo python -m pip install uv
echo.
echo 方法2 - pipx:
echo pip install pipx
echo pipx install uv
echo.
echo 方法3 - 手动下载:
echo 访问: https://docs.astral.sh/uv/getting-started/installation/
echo.
pause
exit /b 1
)
)
echo.
echo ✅ UV 安装完成!
echo.
echo ⚠️ 重要: 请按照以下步骤操作:
echo 1. 关闭此窗口
echo 2. 重新打开命令提示符(或 PowerShell)
echo 3. 回到项目目录: %PROJECT_ROOT%
echo 4. 重新运行此脚本: setup-windows.bat
echo.
pause
exit /b 0
) else (
for /f "tokens=*" %%i in ('uv --version') do echo ✅ %%i
)
echo.
echo [3/4] 📦 安装项目依赖...
echo 工作目录: %PROJECT_ROOT%
echo.
REM 确保在项目目录下执行
cd /d "%PROJECT_ROOT%"
uv sync
if %errorlevel% neq 0 (
echo.
echo ❌ 依赖安装失败
echo.
echo 可能的原因:
echo 1. 网络连接问题
echo 2. Python 版本不兼容(需要 ^>= 3.10)
echo 3. pyproject.toml 文件格式错误
echo.
echo 故障排查:
echo - 检查网络连接
echo - 验证 Python 版本: python --version
echo - 尝试详细输出: uv sync --verbose
echo.
echo 项目目录: %PROJECT_ROOT%
echo.
pause
exit /b 1
)
echo.
echo ✅ 依赖安装成功
echo.
echo [4/4] ⚙️ 检查配置文件...
if not exist "config\config.yaml" (
echo ⚠️ 配置文件不存在: config\config.yaml
if exist "config\config.example.yaml" (
echo.
echo 创建配置文件:
echo 1. 复制: copy config\config.example.yaml config\config.yaml
echo 2. 编辑: notepad config\config.yaml
echo 3. 填入 API 密钥
)
echo.
) else (
echo ✅ config\config.yaml 已存在
)
echo.
REM 获取 UV 路径
for /f "tokens=*" %%i in ('where uv 2^>nul') do set "UV_PATH=%%i"
if not defined UV_PATH (
set "UV_PATH=uv"
)
echo.
echo ==========================================
echo 部署完成!
echo ==========================================
echo.
echo 📋 MCP 服务器配置信息(用于 Claude Desktop):
echo.
echo 命令: %UV_PATH%
echo 工作目录: %PROJECT_ROOT%
echo.
echo 参数(逐行填入):
echo --directory
echo %PROJECT_ROOT%
echo run
echo python
echo -m
echo mcp_server.server
echo.
echo 📖 详细教程: README-Cherry-Studio.md
echo.
echo.
pause
|
2302_81331056/TrendRadar
|
setup-windows.bat
|
Batchfile
|
agpl-3.0
| 4,625
|
@echo off
chcp 65001 >nul
echo ╔════════════════════════════════════════╗
echo ║ TrendRadar MCP Server (HTTP 模式) ║
echo ╚════════════════════════════════════════╝
echo.
REM 检查虚拟环境
if not exist ".venv\Scripts\python.exe" (
echo ❌ [错误] 虚拟环境未找到
echo 请先运行 setup-windows.bat 或 setup-windows-en.bat 进行部署
echo.
pause
exit /b 1
)
echo [模式] HTTP (适合远程访问)
echo [地址] http://localhost:3333/mcp
echo [提示] 按 Ctrl+C 停止服务
echo.
uv run python -m mcp_server.server --transport http --host 0.0.0.0 --port 3333
pause
|
2302_81331056/TrendRadar
|
start-http.bat
|
Batchfile
|
agpl-3.0
| 786
|
#!/bin/bash
echo "╔════════════════════════════════════════╗"
echo "║ TrendRadar MCP Server (HTTP 模式) ║"
echo "╚════════════════════════════════════════╝"
echo ""
# 检查虚拟环境
if [ ! -d ".venv" ]; then
echo "❌ [错误] 虚拟环境未找到"
echo "请先运行 ./setup-mac.sh 进行部署"
echo ""
exit 1
fi
echo "[模式] HTTP (适合远程访问)"
echo "[地址] http://localhost:3333/mcp"
echo "[提示] 按 Ctrl+C 停止服务"
echo ""
uv run python -m mcp_server.server --transport http --host 0.0.0.0 --port 3333
|
2302_81331056/TrendRadar
|
start-http.sh
|
Shell
|
agpl-3.0
| 729
|
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.geometry('400x200')
root.title('登录')
username = tk.StringVar()
password = tk.StringVar()
page = tk.Frame(root)
page.pack()
tk.Label(page).grid(row=0, column=0)
tk.Label(page, text='账户:').grid(row=1, column=1)
tk.Entry(page, textvariable=username).grid(row=1, column=2)
tk.Label(page, text='密码:').grid(row=2, column=1, pady=11)
tk.Entry(page, textvariable=password).grid(row=2, column=2)
def login():
name = username.get()
pwd = password.get()
if name == 'admin' and pwd == '12345':
print('登录成功')
else:
messagebox.showwarning(title='警告', message='登陆失败, 请检查账号密码是否正确')
tk.Button(page, text='登录', command=login).grid(row=3, column=1, pady=11)
tk.Button(page, text='退出', command=page.quit).grid(row=3, column=2)
root.mainloop()
|
2303_77931001/pythonGUI
|
LoginPage.py
|
Python
|
unknown
| 929
|
package dao;
import entiy.Cart;
import entiy.CartProduct;
import java.util.List;
public interface CartDao {
/**
* 增加
*/
int addCart(Cart cart);
/**
* 修改购物车信息
*/
int updateCart(Cart cart);
/**
* 根据商品编号和用户信息获取购物车信息
*/
Cart getCartById(Cart cart);
/**
* 获取当前用户购物车的所有信息
*
* @param cartProduct
* @return
*/
List<CartProduct> getCart(CartProduct cartProduct);
/**
*购物车的数量加或者减
*/
int addAndred(String num ,String cid);
/**
* 清空购物车
*/
int cartDel(String uid);
/**
* 左侧菜单分页
*/
// List<TAddress> queryAddress(TUser user, int nowPage, int pageSize);
}
|
2303_76674713/shopping
|
src/main/java/dao/CartDao.java
|
Java
|
unknown
| 822
|
package dao;
import entiy.Order;
import java.util.List;
public interface OrderDao {
/**
* 添加一个订单并返回自增主键
*
* @param order
* @return
*/
int addOrder(Order order);
/**
* 按照条件分页查询订单
*
* @param
* @return
*/
List<Order> OrderAll(Order order, int nowPage, int pageSize);
}
|
2303_76674713/shopping
|
src/main/java/dao/OrderDao.java
|
Java
|
unknown
| 384
|
package dao;
import entiy.OrderDetails;
import java.util.List;
public interface OrderDetailsDao {
void addOrderDetails(List<OrderDetails>OrderDetails);
List<OrderDetails> getByIds(String ids);
}
|
2303_76674713/shopping
|
src/main/java/dao/OrderDetailsDao.java
|
Java
|
unknown
| 209
|
package dao;
import entiy.Product;
import java.util.List;
public interface ProductDao {
//查询所有
List<Product> selectProduct();
//增加或者修改
int save(Product product);
//分页和多条件
List<Product> All(Product product, int nowPage, int pageSize);
//删除
int del(String id);
//修改餐品状态
int updateproductStatus(String type, String id);
}
|
2303_76674713/shopping
|
src/main/java/dao/ProductDao.java
|
Java
|
unknown
| 415
|
package dao;
import entiy.TAddress;
import entiy.TUser;
import java.util.List;
public interface TAddressDao {
/**
* 查看所有用户的地址和分页
* @param user
* @return
*/
List<TAddress> queryAddress(TUser user,int nowPage,int pageSize);
/**
*增加用户地址
* @return
*/
int add(String addressProvince,String addressCity,String addressDistrict,String addressDescribe,int id);
/**
* 删除用户地址
* @return
*/
int del(String id);
/**
* 修改用户地址
* @return
*/
int update(String addressProvince,String addressCity,String addressDistrict,String addressDescribe,String id);
}
|
2303_76674713/shopping
|
src/main/java/dao/TAddressDao.java
|
Java
|
unknown
| 703
|
package dao;
import entiy.TCategory;
import java.util.List;
public interface TCategoryDao {
/**
* 查看所有餐品
* @param
* @return
*/
List<TCategory> queryAddress(TCategory tcategory, int nowPage, int pageSize);
/**
*增加餐品
* @return
*/
int add(String name,String id);
/**
* 删除餐品
* @return
* @param id
*/
int del(String id);
/**
* 修改餐品
* @return
*/
int update(String name, String id);
}
|
2303_76674713/shopping
|
src/main/java/dao/TCategoryDao.java
|
Java
|
unknown
| 526
|
package dao;
import entiy.TUser;
import entiy.User;
import java.util.List;
public interface TUserDao {
/**
*
* 查询TUser的数据
*/
List<TUser> selectAll();
/**
* 用户登录
*/
TUser login(TUser user);
/**
* 用户注册
* @return
*/
int register(TUser tUser);
/**
* 修改姓名/性别
*/
int updateUserNameandSex(TUser user);
/**
*修改密码
*/
int updateUserPassword(TUser user);
/**
* 根据条件查询实现分页
*/
List<User> All(User user, int nowPage, int pageSize);
/**
* 修改状态
*/
int updateUserStatus(String type,String id);
}
|
2303_76674713/shopping
|
src/main/java/dao/TUserDao.java
|
Java
|
unknown
| 705
|
package dao.imp;
import dao.CartDao;
import entiy.Cart;
import entiy.CartProduct;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.util.List;
public class CartDaoImpl implements CartDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public int addCart(Cart cart) {
try {
return jdbcTemplate.update("insert into t_cart(product_id,product_num,user_id) values(?,?,?)", cart.getProductId(), cart.getProductNum(), cart.getUserId());
} catch (Exception e) {
return 0;
}
}
@Override
public int updateCart(Cart cart) {
try {
return jdbcTemplate.update("update t_cart set product_num=? where cart_id=? ", cart.getProductNum(), cart.getCartId());
} catch (Exception e) {
return 0;
}
}
@Override
public Cart getCartById(Cart cart) {
try {
return jdbcTemplate.queryForObject("select * from t_cart where product_id = ? and user_id = ?", new BeanPropertyRowMapper<Cart>(Cart.class), cart.getProductId(), cart.getUserId());
} catch (Exception e) {
return null;
}
}
@Override
public List<CartProduct> getCart(CartProduct cartProduct) {
String sql = "select c.*,p.product_name,p.product_pic,p.product_price from t_cart c join t_product p on c.product_id = p.product_id where c.user_id = ?";
try {
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<CartProduct>(CartProduct.class), cartProduct.getUserId());
} catch (Exception e) {
return null;
}
}
@Override
public int addAndred(String num, String cid) {
try {
if ("0".equals(num)) {
return jdbcTemplate.update("delete from t_cart where cart_id=?", cid);
} else {
return jdbcTemplate.update("update t_cart set product_num=? where cart_id=? ", num, cid);
}
} catch (Exception e) {
return 0;
}
}
@Override
public int cartDel(String uid) {
try {
return jdbcTemplate.update("delete from t_cart where user_id=?",uid);
}catch (Exception e){
return 0;
}
}
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/CartDaoImpl.java
|
Java
|
unknown
| 2,385
|
package dao.imp;
import dao.OrderDao;
import entiy.Order;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.PreparedStatementCreator;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import untils.DataSourceUtil;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
public class OrderDaoImpl implements OrderDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public int addOrder(Order order) {
Object[] params = new Object[]{
order.getUserId(),
order.getAddressDetails(),
};
String sql = "insert into t_order(order_id,user_id,address_details) values(?,?,?)";
KeyHolder keyHolder = new GeneratedKeyHolder();
jdbcTemplate.update(new PreparedStatementCreator() {
@Override
public PreparedStatement createPreparedStatement(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS);
int i = 1;
// 自增主键为null
ps.setObject(i, null);
for (Object p : params) {
i++;
// 用Object可以添加null参数
ps.setObject(i, p);
}
return ps;
}
}, keyHolder);
// 返回主键id
return keyHolder.getKey().intValue();
}
@Override
public List<Order> OrderAll(Order order, int nowPage, int pageSize) {
//获取关联表
String sql = "select u.user_name,o.* from t_order o join t_user u on o.user_id=u.user_id where 1=1";
List<String> str = new ArrayList<String>();
//参数条件
if (order.getUserId() != null) {
sql += " and o.user_id = ?";
str.add(order.getUserId() + "");
}
if (order.getAddTime() != null && !"".equals(order.getAddTime())) {
sql += " and o.add_time >= ?";
str.add(order.getAddTime());
}
if (order.getAddTimeEnd() != null && !"".equals(order.getAddTimeEnd())) {
sql += " and o.add_time <= ?";
str.add(order.getAddTimeEnd());
}
if (order.getOrderType() != null && order.getOrderType() != 0) {
sql += " and o.order_type = ?";
str.add(order.getOrderType() + "");
}
if (order.getOrderId() != null && order.getOrderId() != 0) {
sql += " and o.order_id = ?";
str.add(order.getOrderId() + "");
}
sql += " order by o.order_id desc";
if (nowPage != 0) {
sql += " limit " + (nowPage - 1) * pageSize + "," + pageSize;
}
System.out.println(pageSize);
try {
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<Order>(Order.class), str.toArray());
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
// 测试
// public static void main(String[] args) {
// OrderDaoImpl dao = new OrderDaoImpl();
// Order order = new Order();
// order.setOrderType(1);
// List<Order> orders = dao.OrderAll(order, 1, 10);
// for (Order or:orders){
// System.out.println(or);
// }
// }
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/OrderDaoImpl.java
|
Java
|
unknown
| 3,622
|
package dao.imp;
import dao.OrderDetailsDao;
import entiy.OrderDetails;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.List;
public class OrderDetailsDaoImp implements OrderDetailsDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public void addOrderDetails(List<OrderDetails> OrderDetails) {
final List<OrderDetails> orderDetails = OrderDetails;
String sql = "insert into t_order_details(product_name,product_num,product_money,order_id) values(?,?,?,?)";
jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public int getBatchSize() {
return orderDetails.size();
}
@Override
public void setValues(PreparedStatement ps, int i)
throws SQLException {
ps.setString(1, orderDetails.get(i).getProductName());
ps.setInt(2, orderDetails.get(i).getProductNum());
ps.setDouble(3, orderDetails.get(i).getProductMoney());
ps.setInt(4, orderDetails.get(i).getOrderId());
}
});
}
@Override
public List<OrderDetails> getByIds(String ids) {
try {
return jdbcTemplate.query("select * from t_order_details where order_id in"+ids, new BeanPropertyRowMapper<OrderDetails>(OrderDetails.class));
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/OrderDetailsDaoImp.java
|
Java
|
unknown
| 1,759
|
package dao.imp;
import dao.ProductDao;
import entiy.Product;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.util.ArrayList;
import java.util.List;
public class ProductDaoImp implements ProductDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public List<Product> selectProduct() {
try {
return jdbcTemplate.query("select * from t_product", new BeanPropertyRowMapper<Product>(Product.class));
} catch (Exception e) {
return null;
}
}
@Override
public int save(Product product) {
try {
if(product.getProductId() != null){
return jdbcTemplate.update("update t_product set product_name=?,product_pic=?,product_price=?,product_describe=?,category_id=? where product_id=?",product.getProductName(),product.getProductPic(),product.getProductPrice(),product.getProductDescribe(),product.getCategoryId(),product.getProductId());
}else{
return jdbcTemplate.update("insert into t_product(product_name,product_pic,product_price,product_describe,category_id) values(?,?,?,?,?)",product.getProductName(),product.getProductPic(),product.getProductPrice(),product.getProductDescribe(),product.getCategoryId());
}
} catch (Exception e) {
return 0;
}
}
@Override
public List<Product> All(Product product, int nowPage, int pageSize) {
try {
String sql = "select * from t_product where 1=1 ";
//查询参数集合
ArrayList<String> str = new ArrayList<>();
//查询条件
if(product.getProductName() != null && !"".equals(product.getProductName())){
sql +="and product_name like ? ";
str.add("%"+product.getProductName()+"%");
}
if(product.getCategoryId() != null && !"".equals(product.getCategoryId())){
sql +="and category_id= ? ";
str.add(product.getCategoryId());
}
if (product.getProductStatus()!=null){
sql +="and product_status= ? ";
str.add(product.getProductStatus());
}
sql += " order by product_id desc";
//分页
if(nowPage != 0){
sql += " limit "+(nowPage-1)*pageSize+","+pageSize;
}
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<Product>(Product.class),str.toArray());
} catch (Exception e) {
return null;
}
}
@Override
public int del(String id) {
try {
return jdbcTemplate.update("delete from t_product where product_id=?",id);
}catch (Exception e){
return 0;
}
}
@Override
public int updateproductStatus(String type, String id) {
try {
return jdbcTemplate.update("update t_product set product_status=? where product_id=?",type, id);
} catch (Exception e) {
return 0;
}
}
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/ProductDaoImp.java
|
Java
|
unknown
| 3,188
|
package dao.imp;
import dao.TAddressDao;
import entiy.TAddress;
import entiy.TUser;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.util.ArrayList;
import java.util.List;
public class TAddressDaoImp implements TAddressDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public List<TAddress> queryAddress(TUser user, int nowPage, int pageSize) {
try {
String sql = "select * from t_address where 1=1 ";
ArrayList<String> str = new ArrayList<>();
if (user.getUserId() != 0 && !"".equals(user.getUserId())) {
sql += "and user_id=?";
str.add(String.valueOf(user.getUserId()));
}
if (nowPage != 0) {
sql += "limit " + (nowPage - 1) * pageSize + "," + pageSize;
}
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<TAddress>(TAddress.class), str.toArray());
} catch (Exception e) {
return null;
}
}
@Override
public int add(String addressProvince, String addressCity, String addressDistrict, String addressDescribe, int id) {
try {
return jdbcTemplate.update("insert into t_address(address_province,address_city,address_district,address_describe,user_id) values (?,?,?,?,?)", addressProvince, addressCity, addressDistrict, addressDescribe, id);
} catch (Exception e) {
return 0;
}
}
@Override
public int del(String id) {
try {
return jdbcTemplate.update("delete from t_address where address_id=?", id);
} catch (Exception e) {
return 0;
}
}
@Override
public int update(String addressProvince, String addressCity, String addressDistrict, String addressDescribe, String id) {
try {
return jdbcTemplate.update("update t_address set address_province=?,address_city=?,address_district=?,address_describe=? where address_id=?", addressProvince, addressCity, addressDistrict, addressDescribe, id);
} catch (Exception e) {
return 0;
}
}
//地址实现分页
// @Override
// public List<TAddress> sAll(TAddress tAddress, int nowPage, int pageSize) {
// try {
// String sql = "select * from t_address where user_id=? ";
//
// //分页
// if(nowPage != 0){
// sql += "limit "+(nowPage-1)*pageSize+","+pageSize;
// }
// return jdbcTemplate.query(sql, new BeanPropertyRowMapper<TAddress>(TAddress.class),tAddress.getUserId());
// } catch (Exception e) {
// return null;
// }
// }
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/TAddressDaoImp.java
|
Java
|
unknown
| 2,815
|
package dao.imp;
import dao.TCategoryDao;
import entiy.TCategory;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.util.ArrayList;
import java.util.List;
public class TCategoryDaoImp implements TCategoryDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public List<TCategory> queryAddress(TCategory tcategory, int nowPage, int pageSize) {
try {
String sql = "select * from t_category where 1=1 ";
ArrayList<String> str = new ArrayList<>();
if (tcategory.getCategoryId() != null && !"".equals(tcategory.getCategoryId())) {
sql += "and category_id=?";
str.add(String.valueOf(tcategory.getCategoryId()));
}
if (nowPage != 0) {
sql += "limit " + (nowPage - 1) * pageSize + "," + pageSize;
}
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<TCategory>(TCategory.class), str.toArray());
} catch (Exception e) {
return null;
}
}
@Override
public int add(String name, String id) {
try {
return jdbcTemplate.update("insert into t_category(category_name,category_id) values (?,?)", name, id);
} catch (Exception e) {
return 0;
}
}
@Override
public int del(String id) {
try {
return jdbcTemplate.update("delete from t_category where category_id=?", id);
} catch (Exception e) {
return 0;
}
}
@Override
public int update(String name, String id) {
try {
return jdbcTemplate.update("update t_category set category_name=?where category_id=?", name, id);
} catch (Exception e) {
return 0;
}
}
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/TCategoryDaoImp.java
|
Java
|
unknown
| 1,914
|
package dao.imp;
import dao.TUserDao;
import entiy.TUser;
import entiy.User;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.JdbcTemplate;
import untils.DataSourceUtil;
import java.util.ArrayList;
import java.util.List;
public class TUserDaoImp implements TUserDao {
private JdbcTemplate jdbcTemplate = new JdbcTemplate(DataSourceUtil.getDataSource());
@Override
public List<TUser> selectAll() {
try {
return jdbcTemplate.query("select * from t_user",new BeanPropertyRowMapper<TUser>(TUser.class));
}catch (Exception e){
return null;
}
}
@Override
public TUser login(TUser user) {
try {
return jdbcTemplate.queryForObject("select * from t_user where user_tel=?", new BeanPropertyRowMapper<TUser>(TUser.class),user.getUserTel());
}catch (Exception e){
return null;
}
}
@Override
public int register(TUser user) {
try {
return jdbcTemplate.update("insert into t_user(user_tel,user_pwd,user_name,user_sex) values(?,?,?,?)", user.getUserTel(), user.getUserPwd(), user.getUserName(), user.getUserSex());
} catch (Exception e) {
return 0;
}
}
@Override
public int updateUserNameandSex(TUser user) {
try {
return jdbcTemplate.update("update t_user set user_name=?,user_sex=? where user_id=?", user.getUserName(), user.getUserSex(), user.getUserId());
} catch (Exception e) {
return 0;
}
}
@Override
public int updateUserPassword(TUser user) {
try {
return jdbcTemplate.update("update t_user set user_pwd=? where user_id=?", user.getUserPwd(), user.getUserId());
} catch (Exception e) {
return 0;
}
}
@Override
public List<User> All(User user, int nowPage, int pageSize) {
try {
String sql = "select * from t_user where 1=1 ";
//查询参数集合
ArrayList<String> str = new ArrayList<>();
//查询条件
if(user.getUserTel() != null && !"".equals(user.getUserTel())){
sql +="and user_tel = ? ";
str.add(user.getUserTel());
}
if(user.getUserName() != null && !"".equals(user.getUserName())){
sql +="and user_name like ? ";
str.add("%"+user.getUserName()+"%");
}
if(user.getAddTime() != null && !"".equals(user.getAddTime())){
sql +="and add_time >= ? ";
str.add(user.getAddTime());
}
if(user.getAddTimeEnd() != null && !"".equals(user.getAddTimeEnd())){
sql +="and add_time <= ? ";
str.add(user.getAddTimeEnd());
}
//分页
if(nowPage != 0){
sql += "limit "+(nowPage-1)*pageSize+","+pageSize;
}
return jdbcTemplate.query(sql, new BeanPropertyRowMapper<User>(User.class),str.toArray());
} catch (Exception e) {
return null;
}
}
@Override
public int updateUserStatus(String type,String id) {
try {
return jdbcTemplate.update("update t_user set user_status=? where user_id=?",type, id);
} catch (Exception e) {
return 0;
}
}
}
|
2303_76674713/shopping
|
src/main/java/dao/imp/TUserDaoImp.java
|
Java
|
unknown
| 3,427
|
package entiy;
public class Cart {
private long cartId;
private long productId;
private long productNum;
private long userId;
public Cart() {
}
public Cart(long cartId, long productId, long productNum, long userId) {
this.cartId = cartId;
this.productId = productId;
this.productNum = productNum;
this.userId = userId;
}
public long getCartId() {
return cartId;
}
public void setCartId(long cartId) {
this.cartId = cartId;
}
public long getProductId() {
return productId;
}
public void setProductId(long productId) {
this.productId = productId;
}
public long getProductNum() {
return productNum;
}
public void setProductNum(long productNum) {
this.productNum = productNum;
}
public long getUserId() {
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
@Override
public String toString() {
return "Cart{" +
"cartId=" + cartId +
", productId=" + productId +
", productNum=" + productNum +
", userId=" + userId +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/Cart.java
|
Java
|
unknown
| 1,130
|
package entiy;
public class CartProduct extends Cart {
private String productName;
private String productPic;
private double productPrice;
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public String getProductPic() {
return productPic;
}
public void setProductPic(String productPic) {
this.productPic = productPic;
}
public double getProductPrice() {
return productPrice;
}
public void setProductPrice(double productPrice) {
this.productPrice = productPrice;
}
@Override
public String toString() {
return "CartProduct{" +
"productName='" + productName + '\'' +
", productPic='" + productPic + '\'' +
", productPrice=" + productPrice +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/CartProduct.java
|
Java
|
unknown
| 941
|
package entiy;
import java.util.List;
public class Order {
private String userName;
private Integer orderId;
private String addTime;
private String addTimeEnd;
private String updateTime;
private Integer userId;
private String addressDetails;
private Integer orderType;
private Double sum;
private List<OrderDetails> orderDetails;
public Order() {
}
public List<OrderDetails> getOrderDetails() {
return orderDetails;
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getAddTimeEnd() {
return addTimeEnd;
}
public void setAddTimeEnd(String addTimeEnd) {
this.addTimeEnd = addTimeEnd;
}
public Double getSum() {
return sum;
}
public void setSum(Double sum) {
this.sum = sum;
}
public void setOrderDetails(List<OrderDetails> orderDetails) {
this.orderDetails = orderDetails;
}
public Order(Integer orderId, String addTime, String updateTime, Integer userId, String addressDetails, Integer orderType) {
this.orderId = orderId;
this.addTime = addTime;
this.updateTime = updateTime;
this.userId = userId;
this.addressDetails = addressDetails;
this.orderType = orderType;
}
@Override
public String toString() {
return "Order{" +
"userName='" + userName + '\'' +
", orderId=" + orderId +
", addTime='" + addTime + '\'' +
", addTimeEnd='" + addTimeEnd + '\'' +
", updateTime='" + updateTime + '\'' +
", userId=" + userId +
", addressDetails='" + addressDetails + '\'' +
", orderType=" + orderType +
", sum=" + sum +
", orderDetails=" + orderDetails +
'}';
}
public Integer getOrderId() {
return orderId;
}
public void setOrderId(Integer orderId) {
this.orderId = orderId;
}
public String getAddTime() {
return addTime;
}
public void setAddTime(String addTime) {
this.addTime = addTime;
}
public String getUpdateTime() {
return updateTime;
}
public void setUpdateTime(String updateTime) {
this.updateTime = updateTime;
}
public Integer getUserId() {
return userId;
}
public void setUserId(Integer userId) {
this.userId = userId;
}
public String getAddressDetails() {
return addressDetails;
}
public void setAddressDetails(String addressDetails) {
this.addressDetails = addressDetails;
}
public Integer getOrderType() {
return orderType;
}
public void setOrderType(Integer orderType) {
this.orderType = orderType;
}
public Order(Integer userId, String addressDetails) {
this.userId = userId;
this.addressDetails = addressDetails;
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/Order.java
|
Java
|
unknown
| 3,084
|
package entiy;
public class OrderDetails {
private Integer orderDetailsId;
private String productName;
private Integer productNum;
private Double productMoney;
private Integer orderId;
public OrderDetails() {
}
public OrderDetails(Integer orderDetailsId, String productName, Integer productNum, Double productMoney, Integer orderId) {
this.orderDetailsId = orderDetailsId;
this.productName = productName;
this.productNum = productNum;
this.productMoney = productMoney;
this.orderId = orderId;
}
public OrderDetails(String productName, Integer productNum, Double productMoney, Integer orderId) {
this.productName = productName;
this.productNum = productNum;
this.productMoney = productMoney;
this.orderId = orderId;
}
@Override
public String toString() {
return "OrderDetails{" +
"orderDetailsId=" + orderDetailsId +
", productName='" + productName + '\'' +
", productNum=" + productNum +
", productMoney=" + productMoney +
", orderId=" + orderId +
'}';
}
public Integer getOrderDetailsId() {
return orderDetailsId;
}
public void setOrderDetailsId(Integer orderDetailsId) {
this.orderDetailsId = orderDetailsId;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public Integer getProductNum() {
return productNum;
}
public void setProductNum(Integer productNum) {
this.productNum = productNum;
}
public Double getProductMoney() {
return productMoney;
}
public void setProductMoney(Double productMoney) {
this.productMoney = productMoney;
}
public Integer getOrderId() {
return orderId;
}
public void setOrderId(Integer orderId) {
this.orderId = orderId;
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/OrderDetails.java
|
Java
|
unknown
| 2,054
|
package entiy;
public class Product {
private String productId;
private String productName;
private String productPic;
private String productPrice;
private String productDescribe;
private String categoryId;
private String productStatus;
public Product(String productId, String productName, String productPic, String productPrice, String productDescribe, String categoryId, String productStatus) {
this.productId = productId;
this.productName = productName;
this.productPic = productPic;
this.productPrice = productPrice;
this.productDescribe = productDescribe;
this.categoryId = categoryId;
this.productStatus = productStatus;
}
public Product() {
}
public String getProductId() {
return productId;
}
public void setProductId(String productId) {
this.productId = productId;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public String getProductPic() {
return productPic;
}
public void setProductPic(String productPic) {
this.productPic = productPic;
}
public String getProductPrice() {
return productPrice;
}
public void setProductPrice(String productPrice) {
this.productPrice = productPrice;
}
public String getProductDescribe() {
return productDescribe;
}
public void setProductDescribe(String productDescribe) {
this.productDescribe = productDescribe;
}
public String getCategoryId() {
return categoryId;
}
public void setCategoryId(String categoryId) {
this.categoryId = categoryId;
}
public String getProductStatus() {
return productStatus;
}
public void setProductStatus(String productStatus) {
this.productStatus = productStatus;
}
@Override
public String toString() {
return "Product{" +
"productId=" + productId +
", productName='" + productName + '\'' +
", productPic='" + productPic + '\'' +
", productPrice=" + productPrice +
", productDescribe='" + productDescribe + '\'' +
", categoryId=" + categoryId +
", productStatus='" + productStatus + '\'' +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/Product.java
|
Java
|
unknown
| 2,342
|
package entiy;
public class TAddress {
private long addressId;
private String addressProvince;
private String addressCity;
private String addressDistrict;
private String addressDescribe;
private long userId;
public long getAddressId() {
return addressId;
}
public void setAddressId(long addressId) {
this.addressId = addressId;
}
public String getAddressProvince() {
return addressProvince;
}
public void setAddressProvince(String addressProvince) {
this.addressProvince = addressProvince;
}
public String getAddressCity() {
return addressCity;
}
public void setAddressCity(String addressCity) {
this.addressCity = addressCity;
}
public String getAddressDistrict() {
return addressDistrict;
}
public void setAddressDistrict(String addressDistrict) {
this.addressDistrict = addressDistrict;
}
public String getAddressDescribe() {
return addressDescribe;
}
public void setAddressDescribe(String addressDescribe) {
this.addressDescribe = addressDescribe;
}
public long getUserId() {
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
@Override
public String toString() {
return "TAddress{" +
"addressId=" + addressId +
", addressProvince='" + addressProvince + '\'' +
", addressCity='" + addressCity + '\'' +
", addressDistrict='" + addressDistrict + '\'' +
", addressDescribe='" + addressDescribe + '\'' +
", userId=" + userId +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/TAddress.java
|
Java
|
unknown
| 1,588
|
package entiy;
public class TCategory {
private String categoryId;
private String categoryName;
public String getCategoryId() {
return categoryId;
}
public void setCategoryId(String categoryId) {
this.categoryId = categoryId;
}
public String getCategoryName() {
return categoryName;
}
public void setCategoryName(String categoryName) {
this.categoryName = categoryName;
}
@Override
public String toString() {
return "TCategory{" +
"categoryId=" + categoryId +
", categoryName='" + categoryName + '\'' +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/TCategory.java
|
Java
|
unknown
| 605
|
package entiy;
public class TOrderDetails {
private long orderDetailsId;
private String productName;
private long productNum;
private double productMoney;
private long orderId;
public long getOrderDetailsId() {
return orderDetailsId;
}
public void setOrderDetailsId(long orderDetailsId) {
this.orderDetailsId = orderDetailsId;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public long getProductNum() {
return productNum;
}
public void setProductNum(long productNum) {
this.productNum = productNum;
}
public double getProductMoney() {
return productMoney;
}
public void setProductMoney(double productMoney) {
this.productMoney = productMoney;
}
public long getOrderId() {
return orderId;
}
public void setOrderId(long orderId) {
this.orderId = orderId;
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/TOrderDetails.java
|
Java
|
unknown
| 960
|
package entiy;
public class TUser {
private long userId;
private String userTel;
private String userPwd;
private String userName;
private String userSex;
private java.sql.Timestamp addTime;
private String userStatus;
private String userRole;
public long getUserId() {
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
public String getUserTel() {
return userTel;
}
public void setUserTel(String userTel) {
this.userTel = userTel;
}
public String getUserPwd() {
return userPwd;
}
public void setUserPwd(String userPwd) {
this.userPwd = userPwd;
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getUserSex() {
return userSex;
}
public void setUserSex(String userSex) {
this.userSex = userSex;
}
public java.sql.Timestamp getAddTime() {
return addTime;
}
public void setAddTime(java.sql.Timestamp addTime) {
this.addTime = addTime;
}
public String getUserStatus() {
return userStatus;
}
public void setUserStatus(String userStatus) {
this.userStatus = userStatus;
}
public String getUserRole() {
return userRole;
}
public void setUserRole(String userRole) {
this.userRole = userRole;
}
@Override
public String toString() {
return "TUser{" +
"userId=" + userId +
", userTel='" + userTel + '\'' +
", userPwd='" + userPwd + '\'' +
", userName='" + userName + '\'' +
", userSex='" + userSex + '\'' +
", addTime=" + addTime +
", userStatus='" + userStatus + '\'' +
", userRole='" + userRole + '\'' +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/TUser.java
|
Java
|
unknown
| 1,806
|
package entiy;
public class User {
private Integer userId;
private String userTel;
private String userPwd;
private String userName;
private String userSex;
private String addTime;
private String addTimeEnd;
private String userStatus;
private String userRole;
public User() {
}
public User(Integer userId, String userTel, String userPwd, String userName, String userSex, String addTime, String userStatus, String userRole) {
this.userId = userId;
this.userTel = userTel;
this.userPwd = userPwd;
this.userName = userName;
this.userSex = userSex;
this.addTime = addTime;
this.userStatus = userStatus;
this.userRole = userRole;
}
public User(Integer userId, String userTel, String userPwd, String userName, String userSex, String addTime, String addTimeEnd, String userStatus, String userRole) {
this.userId = userId;
this.userTel = userTel;
this.userPwd = userPwd;
this.userName = userName;
this.userSex = userSex;
this.addTime = addTime;
this.addTimeEnd = addTimeEnd;
this.userStatus = userStatus;
this.userRole = userRole;
}
public Integer getUserId() {
return userId;
}
public void setUserId(Integer userId) {
this.userId = userId;
}
public String getUserTel() {
return userTel;
}
public void setUserTel(String userTel) {
this.userTel = userTel;
}
public String getUserPwd() {
return userPwd;
}
public void setUserPwd(String userPwd) {
this.userPwd = userPwd;
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getUserSex() {
return userSex;
}
public void setUserSex(String userSex) {
this.userSex = userSex;
}
public String getAddTime() {
return addTime;
}
public void setAddTime(String addTime) {
this.addTime = addTime;
}
public String getAddTimeEnd() {
return addTimeEnd;
}
public void setAddTimeEnd(String addTimeEnd) {
this.addTimeEnd = addTimeEnd;
}
public String getUserStatus() {
return userStatus;
}
public void setUserStatus(String userStatus) {
this.userStatus = userStatus;
}
public String getUserRole() {
return userRole;
}
public void setUserRole(String userRole) {
this.userRole = userRole;
}
@Override
public String toString() {
return "User{" +
"userId=" + userId +
", userTel='" + userTel + '\'' +
", userPwd='" + userPwd + '\'' +
", userName='" + userName + '\'' +
", userSex='" + userSex + '\'' +
", addTime='" + addTime + '\'' +
", addTimeEnd='" + addTimeEnd + '\'' +
", userStatus='" + userStatus + '\'' +
", userRole='" + userRole + '\'' +
'}';
}
}
|
2303_76674713/shopping
|
src/main/java/entiy/User.java
|
Java
|
unknown
| 3,146
|
package service;
import entiy.Cart;
import entiy.CartProduct;
import java.util.List;
public interface CartService {
/**
* 增加
*/
int addCart(Cart cart);
/**
* 获取当前用户购物车的所有信息
* @param cartProduct
* @return
*/
List<CartProduct> getCart(CartProduct cartProduct);
/**
*购物车的数量加或者减
*/
int addAndred(String num ,String cid);
/**
* 清空购物车
*/
int cartDel(String uid);
}
|
2303_76674713/shopping
|
src/main/java/service/CartService.java
|
Java
|
unknown
| 513
|
package service;
import entiy.Order;
import entiy.OrderDetails;
import java.util.List;
public interface OrderDetailsService {
void addOrderDetails(List<OrderDetails> OrderDetails);
List<Order> selectOrderAndDetails(Order order, int nowPage, int pageSize);
}
|
2303_76674713/shopping
|
src/main/java/service/OrderDetailsService.java
|
Java
|
unknown
| 272
|
package service;
import entiy.Order;
import java.util.List;
public interface OrderService {
/**
* 添加一个订单并返回自增主键
* @param order
* @return
*/
int addOrder(Order order);
/**
* 获得所有order数据
* @param Order
* @return
*/
List<Order> OrderAll(Order Order,int nowPage,int pageSize);
}
|
2303_76674713/shopping
|
src/main/java/service/OrderService.java
|
Java
|
unknown
| 376
|
package service;
import entiy.Product;
import java.util.List;
public interface ProductService {
//获取所有数据
List<Product> selectProduct();
//修改数据
int save(Product product);
/**
* 根据条件查询实现分页
*/
List<Product> All(Product product, int nowPage, int pageSize);
/**
* 删除
* @param id
*/
int del(String id);
/**
* 修改餐品状态
* @param type
* @param id
* @return
*/
int updateproductStatus(String type,String id);
}
|
2303_76674713/shopping
|
src/main/java/service/ProductService.java
|
Java
|
unknown
| 556
|
package service;
import entiy.TAddress;
import entiy.TUser;
import java.util.List;
public interface TAddressService {
/**
* 查看所有用户的地址和分页
* @param user
* @return
*/
List<TAddress> queryAddress(TUser user,int nowPage, int pageSize);
/**
*增加用户地址
* @return
*/
int add(String addressProvince,String addressCity,String addressDistrict,String addressDescribe,int id);
/**
* 删除用户地址
* @return
*/
int del(String id);
/**
* 修改用户地址
* @return
*/
int update(String addressProvince,String addressCity,String addressDistrict,String addressDescribe,String id);
/**
* 根据实现分页
*/
// List<TAddress> All(TAddress tAddress, int nowPage, int pageSize);
}
|
2303_76674713/shopping
|
src/main/java/service/TAddressService.java
|
Java
|
unknown
| 829
|
package service;
import entiy.TCategory;
import java.util.List;
public interface TCategoryService {
/**
* 查看所有餐品
* @param
* @return
*/
List<TCategory> queryAddress(TCategory tcategory, int nowPage, int pageSize);
/**
*增加餐品
* @return
*/
int add(String name, String id);
/**
* 删除餐品
* @return
* @param id
*/
int del(String id);
/**
* 修改餐品
* @return
*/
int update(String name, String id);
}
|
2303_76674713/shopping
|
src/main/java/service/TCategoryService.java
|
Java
|
unknown
| 535
|
package service;
import entiy.TUser;
import entiy.User;
import java.util.List;
public interface TUserService {
/**
*
* 查询TUser的数据
*/
List<TUser> selectAll();
/**
* 用户登录
* @return
*/
TUser login(TUser user);
/**
* 用户注册
* @return
*/
int register(TUser user);
/**
* 修改姓名/性别
*/
int updateUserNameandSex(TUser user);
/**
*修改密码
*/
int updateUserPassword(TUser tUser);
/**
* 根据条件查询实现分页
*/
List<User> All(User user, int nowPage, int pageSize);
/**
*
*/
int updateUserStatus(String type,String id);
}
|
2303_76674713/shopping
|
src/main/java/service/TUserService.java
|
Java
|
unknown
| 712
|