💖💖作者:计算机毕业设计江挽
💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我!
💛💛想说的话:感谢大家的关注与支持!
💜💜
网站实战项目
安卓/小程序实战项目
大数据实战项目
深度学习实战项目
目录
基于大数据的校园霸凌数据分析系统介绍
校园霸凌数据分析系统是一套基于大数据技术构建的综合性数据分析平台,采用Hadoop分布式存储架构和Spark内存计算引擎作为核心技术底座。系统通过Django框架搭建稳定的后端服务,配合Vue+ElementUI构建现代化的前端交互界面,实现对校园霸凌现象的多维度数据分析和可视化展示。系统涵盖用户管理、霸凌状态分析、霸凌影响分析、人口统计分析、体重与霸凌关联分析等八大功能模块,每个模块都经过精心设计以满足不同角度的数据分析需求。通过集成Echarts图表库,系统能够将复杂的统计数据转化为直观的可视化图表,帮助用户快速理解数据背后的规律和趋势。整个系统采用前后端分离的设计模式,保证了良好的可扩展性和维护性,同时利用Spark SQL和Pandas、NumPy等数据处理工具,实现了高效的大数据处理能力,为校园安全管理和决策支持提供了可靠的技术手段。
基于大数据的校园霸凌数据分析系统演示视频
基于大数据的校园霸凌数据分析系统 | 同样用Hadoop+Spark做毕设,为什么校园霸凌数据分析系统通过率这么高?
基于大数据的校园霸凌数据分析系统演示图片








基于大数据的校园霸凌数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, when, desc
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views import View
import json
spark = SparkSession.builder.appName("CampusBullyingAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
class BullyingStatusAnalysis(View):
def post(self, request):
data = json.loads(request.body)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/campus_db").option("dbtable", "bullying_records").option("user", "root").option("password", "123456").load()
status_counts = df.groupBy("bullying_status").agg(count("*").alias("count"), avg("severity_score").alias("avg_severity")).orderBy(desc("count"))
monthly_trend = df.groupBy("month", "bullying_status").agg(count("*").alias("incident_count")).orderBy("month")
severity_distribution = df.select(when(col("severity_score") < 3, "轻微").when(col("severity_score") < 6, "中等").otherwise("严重").alias("severity_level")).groupBy("severity_level").count()
victim_age_analysis = df.filter(col("role") == "victim").groupBy("age_group").agg(count("*").alias("victim_count"), avg("psychological_impact").alias("avg_impact"))
perpetrator_analysis = df.filter(col("role") == "perpetrator").groupBy("gender").agg(count("*").alias("perp_count"), avg("frequency").alias("avg_frequency"))
location_analysis = df.groupBy("incident_location").agg(count("*").alias("location_count"), sum("duration_minutes").alias("total_duration"))
time_analysis = df.groupBy("hour_of_day").agg(count("*").alias("hourly_count")).orderBy("hour_of_day")
correlation_data = df.select("severity_score", "duration_minutes", "psychological_impact").toPandas()
correlation_matrix = correlation_data.corr().to_dict()
repeat_incidents = df.groupBy("victim_id").agg(count("*").alias("incident_count")).filter(col("incident_count") > 1).count()
intervention_success = df.filter(col("intervention_applied") == True).agg(avg("outcome_score").alias("success_rate")).collect()[0]["success_rate"]
weekly_pattern = df.groupBy("day_of_week").agg(count("*").alias("daily_count"), avg("severity_score").alias("daily_severity"))
grade_analysis = df.groupBy("grade_level").agg(count("*").alias("grade_count"), avg("academic_impact").alias("avg_academic_impact"))
response_time_analysis = df.select(avg("response_time_hours").alias("avg_response_time"), count(when(col("response_time_hours") <= 24, True)).alias("within_24h")).collect()[0]
result_data = {"status_distribution": [{"status": row["bullying_status"], "count": row["count"], "severity": round(row["avg_severity"], 2)} for row in status_counts.collect()], "monthly_trend": [{"month": row["month"], "status": row["bullying_status"], "count": row["incident_count"]} for row in monthly_trend.collect()], "severity_levels": [{"level": row["severity_level"], "count": row["count"]} for row in severity_distribution.collect()], "correlation_matrix": correlation_matrix, "repeat_rate": repeat_incidents, "intervention_rate": round(intervention_success or 0, 2)}
return JsonResponse(result_data, safe=False)
class BullyingImpactAnalysis(View):
def post(self, request):
data = json.loads(request.body)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/campus_db").option("dbtable", "impact_records").option("user", "root").option("password", "123456").load()
academic_impact = df.groupBy("academic_performance_change").agg(count("*").alias("count"), avg("grade_drop_percentage").alias("avg_drop")).orderBy(desc("count"))
psychological_impact = df.select("anxiety_level", "depression_score", "self_esteem_score", "social_isolation_score").toPandas()
psychological_stats = {"anxiety_high": len(psychological_impact[psychological_impact["anxiety_level"] > 7]), "depression_moderate": len(psychological_impact[psychological_impact["depression_score"] > 5]), "low_esteem": len(psychological_impact[psychological_impact["self_esteem_score"] < 3])}
behavioral_changes = df.groupBy("behavioral_change_type").agg(count("*").alias("change_count"), avg("change_intensity").alias("avg_intensity"))
long_term_effects = df.filter(col("follow_up_months") >= 6).groupBy("recovery_status").agg(count("*").alias("status_count"), avg("improvement_score").alias("avg_improvement"))
family_impact = df.groupBy("family_involvement_level").agg(count("*").alias("family_count"), avg("family_stress_score").alias("avg_stress"))
social_network_analysis = df.select("friends_count_before", "friends_count_after").toPandas()
social_network_analysis["friend_loss"] = social_network_analysis["friends_count_before"] - social_network_analysis["friends_count_after"]
avg_friend_loss = social_network_analysis["friend_loss"].mean()
attendance_impact = df.groupBy("attendance_change").agg(count("*").alias("attendance_count"), avg("absence_days").alias("avg_absence"))
teacher_reported_changes = df.filter(col("teacher_observation") != "").groupBy("teacher_concern_level").count()
peer_relationship_impact = df.select("peer_acceptance_score", "social_participation_score").toPandas().mean().to_dict()
recovery_timeline = df.filter(col("recovery_status") == "recovered").select(avg("recovery_time_months").alias("avg_recovery_time")).collect()[0]["avg_recovery_time"]
support_effectiveness = df.groupBy("support_type").agg(count("*").alias("support_count"), avg("effectiveness_score").alias("avg_effectiveness")).orderBy(desc("avg_effectiveness"))
risk_factors = df.select("previous_trauma", "family_history", "social_support_level").toPandas()
high_risk_count = len(risk_factors[(risk_factors["previous_trauma"] == True) & (risk_factors["social_support_level"] < 3)])
gender_impact_diff = df.groupBy("gender").agg(avg("psychological_impact_total").alias("avg_psych_impact"), avg("academic_impact_score").alias("avg_academic_impact"))
result_data = {"academic_impact": [{"change": row["academic_performance_change"], "count": row["count"], "avg_drop": round(row["avg_drop"], 2)} for row in academic_impact.collect()], "psychological_stats": psychological_stats, "behavioral_changes": [{"type": row["behavioral_change_type"], "count": row["change_count"], "intensity": round(row["avg_intensity"], 2)} for row in behavioral_changes.collect()], "recovery_data": {"avg_time": round(recovery_timeline or 0, 1), "avg_friend_loss": round(avg_friend_loss, 1)}, "support_effectiveness": [{"type": row["support_type"], "count": row["support_count"], "effectiveness": round(row["avg_effectiveness"], 2)} for row in support_effectiveness.collect()]}
return JsonResponse(result_data, safe=False)
class DemographicStatisticalAnalysis(View):
def post(self, request):
data = json.loads(request.body)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/campus_db").option("dbtable", "demographic_data").option("user", "root").option("password", "123456").load()
age_distribution = df.groupBy("age_group").agg(count("*").alias("total_count"), count(when(col("is_victim"), True)).alias("victim_count"), count(when(col("is_perpetrator"), True)).alias("perp_count"))
gender_analysis = df.groupBy("gender").agg(count("*").alias("gender_total"), avg("incident_involvement_rate").alias("avg_involvement"), sum(when(col("role") == "bystander", 1).otherwise(0)).alias("bystander_count"))
grade_level_stats = df.groupBy("grade_level").agg(count("*").alias("grade_count"), avg("academic_performance").alias("avg_performance"), count(when(col("disciplinary_actions") > 0, True)).alias("discipline_count"))
socioeconomic_analysis = df.groupBy("socioeconomic_status").agg(count("*").alias("ses_count"), avg("bullying_frequency").alias("avg_frequency"), count(when(col("support_services_used") > 0, True)).alias("support_users"))
ethnicity_breakdown = df.groupBy("ethnicity").agg(count("*").alias("ethnic_count"), avg("cultural_adaptation_score").alias("avg_adaptation"), count(when(col("language_barrier"), True)).alias("language_issues"))
family_structure_impact = df.groupBy("family_structure").agg(count("*").alias("family_count"), avg("family_support_score").alias("avg_support"), avg("parental_involvement").alias("avg_involvement"))
geographic_distribution = df.groupBy("residential_area").agg(count("*").alias("area_count"), avg("community_safety_score").alias("avg_safety"), count(when(col("transportation_issues"), True)).alias("transport_issues"))
special_needs_analysis = df.filter(col("has_special_needs")).groupBy("special_needs_type").agg(count("*").alias("special_count"), avg("accommodation_satisfaction").alias("avg_satisfaction"))
migration_status = df.groupBy("migration_background").agg(count("*").alias("migration_count"), avg("integration_score").alias("avg_integration"), avg("peer_acceptance").alias("avg_acceptance"))
parental_education = df.groupBy("parental_education_level").agg(count("*").alias("education_count"), avg("awareness_score").alias("avg_awareness"), count(when(col("participated_in_programs"), True)).alias("program_participants"))
sibling_influence = df.filter(col("has_siblings")).groupBy("sibling_position").agg(count("*").alias("position_count"), avg("sibling_support_score").alias("avg_sibling_support"))
school_transfer_history = df.groupBy("school_changes").agg(count("*").alias("transfer_count"), avg("adaptation_difficulty").alias("avg_difficulty"), avg("social_integration_time").alias("avg_integration_time"))
extracurricular_participation = df.groupBy("activity_participation_level").agg(count("*").alias("activity_count"), avg("social_skills_score").alias("avg_social_skills"), avg("leadership_score").alias("avg_leadership"))
technology_access = df.groupBy("technology_access_level").agg(count("*").alias("tech_count"), avg("digital_literacy_score").alias("avg_literacy"), count(when(col("cyberbullying_involved"), True)).alias("cyber_incidents"))
health_conditions = df.filter(col("has_health_conditions")).groupBy("health_condition_type").agg(count("*").alias("health_count"), avg("absence_rate").alias("avg_absence"), avg("medication_impact").alias("avg_med_impact"))
result_data = {"age_distribution": [{"age_group": row["age_group"], "total": row["total_count"], "victims": row["victim_count"], "perpetrators": row["perp_count"]} for row in age_distribution.collect()], "gender_breakdown": [{"gender": row["gender"], "total": row["gender_total"], "involvement": round(row["avg_involvement"], 2), "bystanders": row["bystander_count"]} for row in gender_analysis.collect()], "socioeconomic_data": [{"status": row["socioeconomic_status"], "count": row["ses_count"], "frequency": round(row["avg_frequency"], 2)} for row in socioeconomic_analysis.collect()], "family_structure": [{"structure": row["family_structure"], "count": row["family_count"], "support": round(row["avg_support"], 2)} for row in family_structure_impact.collect()], "geographic_info": [{"area": row["residential_area"], "count": row["area_count"], "safety": round(row["avg_safety"], 2)} for row in geographic_distribution.collect()]}
return JsonResponse(result_data, safe=False)
基于大数据的校园霸凌数据分析系统文档展示

💖💖作者:计算机毕业设计江挽
💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我!
💛💛想说的话:感谢大家的关注与支持!
💜💜
网站实战项目
安卓/小程序实战项目
大数据实战项目
深度学习实战项目
大数据助力校园霸凌分析系统

被折叠的 条评论
为什么被折叠?



