pyton报错

为什么能输出file_rdd,但是输出不了file_str_rdd呢

from pyspark import SparkConf, SparkContext
import os, json

os.environ['PYSPARK_PYTHON'] = "E:/python/python3/python.exe"
os.environ['HADOOP_HOME'] = "e:/dev/hadoop-3.0.0"
conf = SparkConf().setMaster("local[*]").setAppName("test.spark")
sc = SparkContext(conf=conf)
# 读取文件转换成RDD
file_rdd = sc.textFile("f:/search_log.txt")
# print(file_rdd.collect())
# TODO 需求1: 热门搜索时间段Top3 (小时精度)
# 1.1取出全部的时间并转换为小时
# 1.2转换为(小时,1)的二元元组
# 1.3 Key分组聚合Volue
# 1.4排序(降序)
# 1.5取前3
file_str_rdd = file_rdd.map(lambda x: x.split("\t")). \
    map(lambda x: x[0][:2]). \
    flatMap(lambda x: (x, 1)). \
    reduceByKey(lambda a, b: a + b).sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
    take(3)
print(file_str_rdd)




下面的运行file_str_rdd报错的代码

E:\pycharm\python-learn\venv\Scripts\python.exe E:\pycharm\python-learn\pyspark\13_综合案例.py 
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
23/05/30 13:40:17 ERROR Executor: Exception in task 1.0 in stage 0.0 (TID 1)
org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 830, in main
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 820, in process
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 828, in func
    return f(iterator)
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 3964, in combineLocally
    merger.mergeValues(iterator)
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\shuffle.py", line 256, in mergeValues
    for k, v in iterator:
TypeError: cannot unpack non-iterable int object

	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:561)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:767)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:749)
	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:514)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1211)
	at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1217)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:460)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
	at org.apache.spark.scheduler.Task.run(Task.scala:139)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:833)
23/05/30 13:40:17 WARN TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1) (gsh executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 830, in main
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 820, in process
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 828, in func
    return f(iterator)
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 3964, in combineLocally
    merger.mergeValues(iterator)
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\shuffle.py", line 256, in mergeValues
    for k, v in iterator:
TypeError: cannot unpack non-iterable int object

	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:561)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:767)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:749)
	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:514)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1211)
	at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1217)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:460)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
	at org.apache.spark.scheduler.Task.run(Task.scala:139)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:833)

23/05/30 13:40:17 ERROR TaskSetManager: Task 1 in stage 0.0 failed 1 times; aborting job
Traceback (most recent call last):
  File "E:\pycharm\python-learn\pyspark\13_综合案例.py", line 24, in <module>
    take(3)
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 2836, in take
    res = self.context.runJob(self, takeUpToNumLeft, p)
  File "E:\python\python3\lib\site-packages\pyspark\context.py", line 2319, in runJob
    sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
  File "E:\python\python3\lib\site-packages\py4j\java_gateway.py", line 1322, in __call__
    return_value = get_return_value(
  File "E:\python\python3\lib\site-packages\py4j\protocol.py", line 326, in get_return_value
    raise Py4JJavaError(
: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.runJob.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 0.0 failed 1 times, most recent failure: Lost task 1.0 in stage 0.0 (TID 1) (gsh executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 830, in main
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 820, in process
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 828, in func
    return f(iterator)
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 3964, in combineLocally
    merger.mergeValues(iterator)
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\shuffle.py", line 256, in mergeValues
    for k, v in iterator:
TypeError: cannot unpack non-iterable int object

	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:561)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:767)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:749)
	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:514)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1211)
	at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1217)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:460)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
	at org.apache.spark.scheduler.Task.run(Task.scala:139)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:833)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2785)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2721)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2720)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2720)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1206)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1206)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1206)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2984)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2923)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2912)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:971)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2263)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2284)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2303)
	at org.apache.spark.api.python.PythonRDD$.runJob(PythonRDD.scala:179)
	at org.apache.spark.api.python.PythonRDD.runJob(PythonRDD.scala)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:833)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 830, in main
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py", line 820, in process
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 5405, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 828, in func
    return f(iterator)
  File "E:\python\python3\lib\site-packages\pyspark\rdd.py", line 3964, in combineLocally
    merger.mergeValues(iterator)
  File "E:\python\python3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\shuffle.py", line 256, in mergeValues
    for k, v in iterator:
TypeError: cannot unpack non-iterable int object

	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:561)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:767)
	at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:749)
	at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:514)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1211)
	at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1217)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:460)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
	at org.apache.spark.scheduler.Task.run(Task.scala:139)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	... 1 more


Process finished with exit code 1

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值