- java代码
package com.example;
import static android.opengl.GLES20.GL_LINEAR;
import static android.opengl.GLES20.GL_TEXTURE_2D;
import static android.opengl.GLES20.GL_TEXTURE_MAG_FILTER;
import static android.opengl.GLES20.GL_TEXTURE_MIN_FILTER;
import static android.opengl.GLES20.glBindTexture;
import static android.opengl.GLES20.glGetUniformLocation;
import static android.opengl.GLES20.glTexParameteri;
import android.annotation.SuppressLint;
import android.ddm.DdmHandleAppName;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Canvas;
import android.graphics.GraphicBuffer;
import android.graphics.ImageFormat;
import android.graphics.PixelFormat;
import android.hardware.HardwareBuffer;
import android.hardware.display.DisplayManagerGlobal;
import android.opengl.EGL14;
import android.opengl.EGLConfig;
import android.opengl.EGLContext;
import android.opengl.EGLDisplay;
import android.opengl.EGLSurface;
import android.opengl.GLES20;
import android.opengl.GLES30;
import android.os.Looper;
import android.os.Trace;
import android.util.Slog;
import android.view.Display;
import android.view.DisplayInfo;
import android.view.Surface;
import android.view.SurfaceControl;
import android.view.SurfaceSession;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.nio.ShortBuffer;
import javax.microedition.khronos.egl.EGL10;
public class Test {
static {
System.loadLibrary("skottie_android");
}
public static void main(String[] args) {
DdmHandleAppName.setAppName("com.example.v8", 0);
android.os.Process.setUid(android.os.Process.ROOT_UID);
Looper.prepareMainLooper();
DisplayInfo displayInfo = DisplayManagerGlobal.getInstance().getDisplayInfo(Display.DEFAULT_DISPLAY);
int width = displayInfo.logicalWidth / 4 * 3;
int height = displayInfo.logicalHeight / 2;
Surface surface = new Surface();
SurfaceSession session = new SurfaceSession();
SurfaceControl control = new SurfaceControl.Builder(session)
.setName("test")
.setBufferSize(width, height)
.setFormat(PixelFormat.RGBA_8888)
.setFlags(0)
.build();
SurfaceControl.Transaction t = new SurfaceControl.Transaction();
t.setLayer(control, 300000);
t.setPosition(control, 100, 300);
t.show(control);
t.apply();
surface.copyFrom(control);
GLUtils.draw(args[0], surface);
Trace.traceEnd(Trace.TRACE_TAG_VIEW);
Looper.loop();
}
public static int createTexture(String path) {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeFile(path, options);
int width = options.outWidth;
int height = options.outHeight;
GraphicBuffer graphicBuffer = GraphicBuffer.create(width, height, ImageFormat.YV12,
GraphicBuffer.USAGE_HW_TEXTURE | GraphicBuffer.USAGE_SW_WRITE_RARELY);
HardwareBuffer hardwareBuffer = HardwareBuffer.createFromGraphicBuffer(graphicBuffer);
return createExternalTexture(path, hardwareBuffer, width, height);
}
private static native int createExternalTexture(String path, HardwareBuffer hardwareBuffer, int width, int height);
}
jni代码
extern "C"
JNIEXPORT jint JNICALL
Java_com_example_Test_createExternalTexture(JNIEnv *env, jclass clazz, jstring path,
jobject hardware_buffer, jint width, jint height) {
const char *cstr = env->GetStringUTFChars(path, NULL);
std::unique_ptr<SkStream> stream(SkStream::MakeFromFile(cstr));
std::unique_ptr<SkCodec> codec(SkCodec::MakeFromStream(std::move(stream)));
static constexpr auto kAllTypes = SkYUVAPixmapInfo::SupportedDataTypes::All();
SkYUVAPixmapInfo yuvaPixmapInfo;
bool success = codec->queryYUVAInfo(kAllTypes, &yuvaPixmapInfo);
auto pixmaps = SkYUVAPixmaps::Allocate(yuvaPixmapInfo);
success = codec->getYUVAPlanes(pixmaps);
AHardwareBuffer* buffer = AHardwareBuffer_fromHardwareBuffer(env, hardware_buffer);
void *data = nullptr;
AHardwareBuffer_lock(buffer,
AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
-1,
nullptr,
&data);
size_t copied = 0;
int order[3] = {0, 2, 1};
for (int i = 0; i < pixmaps.numPlanes(); i++) {
int c = order[i];
char *dst = (char*)data + copied;
size_t len = pixmaps.plane(c).computeByteSize();
memcpy(dst, pixmaps.plane(c).addr(), len);
copied +=len;
}
int fence = -1;
AHardwareBuffer_unlock(buffer, &fence);
EGLDisplay eglCurrentDisplay = eglGetCurrentDisplay();
EGLint imageAttrs[] = {EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE};
EGLClientBuffer eglBuffer = eglGetNativeClientBufferANDROID(buffer);
EGLImageKHR image = eglCreateImageKHR(eglCurrentDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID, eglBuffer,
imageAttrs);
GLuint ids[1];
glGenTextures(1, ids);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, ids[0]);
// Map texture
glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
return ids[0];
}
#!/system/bin/sh
base=/sdcard/webp
export CLASSPATH=$base/classes.dex
exec app_process -XjdwpProvider:default -XjdwpOptions:suspend=n,server=y -Djava.library.path=/data/local/tmp $base/ com.example.Test $@
diff --git a/libs/hwui/HardwareBitmapUploader.cpp b/libs/hwui/HardwareBitmapUploader.cpp
index c24cabb287de..36f26caeefa5 100644
--- a/libs/hwui/HardwareBitmapUploader.cpp
+++ b/libs/hwui/HardwareBitmapUploader.cpp
@@ -28,12 +28,17 @@
#include <utils/GLUtils.h>
#include <utils/NdkUtils.h>
#include <utils/Trace.h>
+#include <gl/GrGLInterface.h>
+#include <cstddef>
#include <thread>
+#include "SkYUVAPixmaps.h"
+#include "GrRecordingContext.h"
#include "hwui/Bitmap.h"
#include "renderthread/EglManager.h"
#include "renderthread/VulkanManager.h"
+#include "renderthread/RenderThread.h"
#include "thread/ThreadBase.h"
#include "utils/TimeUtils.h"
@@ -43,6 +48,7 @@ class AHBUploader;
// This helper uploader classes allows us to upload using either EGL or Vulkan using the same
// interface.
static sp<AHBUploader> sUploader = nullptr;
+static sp<AHBUploader> sGpuUploader = nullptr;
struct FormatInfo {
AHardwareBuffer_Format bufferFormat;
@@ -76,6 +82,14 @@ public:
return result;
}
+ sk_sp<SkImage> uploadGpuBitmap(const SkYUVAPixmaps& pixmaps) {
+ ATRACE_CALL();
+ beginUpload();
+ auto image = onUploadGpuBitmap(pixmaps);
+ endUpload();
+ return image;
+ }
+
void postIdleTimeoutCheck() {
mUploadThread->queue().postDelayed(5000_ms, [this](){ this->idleTimeoutCheck(); });
}
@@ -90,6 +104,7 @@ private:
virtual bool onUploadHardwareBitmap(const SkBitmap& bitmap, const FormatInfo& format,
AHardwareBuffer* ahb) = 0;
+ virtual sk_sp<SkImage> onUploadGpuBitmap(const SkYUVAPixmaps& pixmaps) { return nullptr;};
virtual void onBeginUpload() = 0;
bool shouldTimeOutLocked() {
@@ -221,6 +236,57 @@ private:
renderthread::EglManager mEglManager;
};
+class GpuUploader : public AHBUploader {
+private:
+ void onDestroy() override {
+ mEglManager.destroy();
+ }
+ void onIdle() override {
+ mEglManager.destroy();
+ }
+
+ void onBeginUpload() override {
+ if (!mEglManager.hasEglContext()) {
+ mUploadThread->queue().runSync([this]() {
+ auto context = renderthread::RenderThread::getInstance().eglManager().eglContext();
+ this->mEglManager.initialize(context);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ sk_sp<const GrGLInterface> glInterface(GrGLCreateNativeInterface());
+ GrContextOptions options;
+ options.fPreferExternalImagesOverES3 = true;
+ options.fDisableDistanceFieldPaths = true;
+ mGrContext = GrDirectContext::MakeGL(std::move(glInterface), options);
+ });
+
+ this->postIdleTimeoutCheck();
+ }
+ }
+
+
+ EGLDisplay getUploadEglDisplay() {
+ std::lock_guard _lock{mLock};
+ LOG_ALWAYS_FATAL_IF(!mEglManager.hasEglContext(), "Forgot to begin an upload?");
+ return mEglManager.eglDisplay();
+ }
+
+ bool onUploadHardwareBitmap(const SkBitmap& bitmap, const FormatInfo& format,
+ AHardwareBuffer* ahb) override {
+
+ return true;
+ }
+
+ sk_sp<SkImage> onUploadGpuBitmap(const SkYUVAPixmaps& pixmaps) override {
+ sk_sp<SkImage> image;
+ renderthread::RenderThread::getInstance().queue().runSync([&image, &pixmaps] {
+ image = SkImage::MakeFromYUVAPixmaps(renderthread::RenderThread::getInstance().mGrContext.get(), pixmaps);
+ });
+ return image;
+ }
+
+ sk_sp<GrDirectContext> mGrContext;
+ renderthread::EglManager mEglManager;
+};
+
class VkUploader : public AHBUploader {
private:
void onDestroy() override {
@@ -406,6 +472,22 @@ static void createUploader(bool usingGL) {
}
}
+static void createGpuUploader(bool usingGL) {
+ static std::mutex lock;
+ std::lock_guard _lock{lock};
+ if (!sGpuUploader.get() && usingGL) {
+ sGpuUploader = new GpuUploader();
+ }
+}
+
+sk_sp<Bitmap> HardwareBitmapUploader::allocateGpuBitmap(const SkYUVAPixmaps& pixmaps) {
+ bool usingGL = uirenderer::Properties::getRenderPipelineType() ==
+ uirenderer::RenderPipelineType::SkiaGL;
+ createGpuUploader(usingGL);
+ auto image = sGpuUploader->uploadGpuBitmap(pixmaps);
+ return sk_sp<Bitmap>(new Bitmap(image, pixmaps.plane(0).info(), pixmaps.plane(0).rowBytes()));
+}
+
sk_sp<Bitmap> HardwareBitmapUploader::allocateHardwareBitmap(const SkBitmap& sourceBitmap) {
ATRACE_CALL();
diff --git a/libs/hwui/HardwareBitmapUploader.h b/libs/hwui/HardwareBitmapUploader.h
index 81057a24c29c..75c66a908aa8 100644
--- a/libs/hwui/HardwareBitmapUploader.h
+++ b/libs/hwui/HardwareBitmapUploader.h
@@ -20,6 +20,7 @@
namespace android::uirenderer {
+class GpuUploader;
class HardwareBitmapUploader {
public:
static void initialize();
@@ -27,6 +28,8 @@ public:
static sk_sp<Bitmap> allocateHardwareBitmap(const SkBitmap& sourceBitmap);
+ static sk_sp<Bitmap> allocateGpuBitmap(const SkYUVAPixmaps& pixmaps);
+
#ifdef __ANDROID__
static bool hasFP16Support();
static bool has1010102Support();
diff --git a/libs/hwui/hwui/Bitmap.cpp b/libs/hwui/hwui/Bitmap.cpp
index 67f47580a70f..8deb2117e3de 100644
--- a/libs/hwui/hwui/Bitmap.cpp
+++ b/libs/hwui/hwui/Bitmap.cpp
@@ -114,6 +114,14 @@ sk_sp<Bitmap> Bitmap::allocateHardwareBitmap(const SkBitmap& bitmap) {
#endif
}
+sk_sp<Bitmap> Bitmap::allocateGpuBitmap(const SkYUVAPixmaps& pixmaps) {
+#ifdef __ANDROID__
+ return uirenderer::HardwareBitmapUploader::allocateGpuBitmap(pixmaps);
+#else
+ return nullptr;
+#endif
+}
+
sk_sp<Bitmap> Bitmap::allocateHeapBitmap(SkBitmap* bitmap) {
return allocateBitmap(bitmap, &Bitmap::allocateHeapBitmap);
}
@@ -243,6 +251,16 @@ Bitmap::Bitmap(void* address, int fd, size_t mappedSize, const SkImageInfo& info
mPixelStorage.ashmem.size = mappedSize;
}
+Bitmap::Bitmap(sk_sp<SkImage> image, const SkImageInfo& info, size_t rowBytes)
+ : SkPixelRef(info.width(), info.height(), nullptr, rowBytes)
+ , mInfo(validateAlpha(info))
+ , mPixelStorageType(PixelStorageType::Hardware)
+ , mPalette(BitmapPalette::Unknown)
+ , mPaletteGenerationId(getGenerationID()) {
+ setImmutable(); // HW bitmaps are always immutable
+ mImage = image;
+}
+
#ifdef __ANDROID__ // Layoutlib does not support hardware acceleration
Bitmap::Bitmap(AHardwareBuffer* buffer, const SkImageInfo& info, size_t rowBytes,
BitmapPalette palette)
diff --git a/libs/hwui/hwui/Bitmap.h b/libs/hwui/hwui/Bitmap.h
index 94a047c06ced..600df35d67b5 100644
--- a/libs/hwui/hwui/Bitmap.h
+++ b/libs/hwui/hwui/Bitmap.h
@@ -69,6 +69,7 @@ public:
*/
static sk_sp<Bitmap> allocateAshmemBitmap(SkBitmap* bitmap);
static sk_sp<Bitmap> allocateHardwareBitmap(const SkBitmap& bitmap);
+ static sk_sp<Bitmap> allocateGpuBitmap(const SkYUVAPixmaps& pixmaps);
static sk_sp<Bitmap> allocateHeapBitmap(SkBitmap* bitmap);
static sk_sp<Bitmap> allocateHeapBitmap(const SkImageInfo& info);
static sk_sp<Bitmap> allocateHeapBitmap(size_t size, const SkImageInfo& i, size_t rowBytes);
@@ -165,6 +166,7 @@ public:
static bool compress(const SkBitmap& bitmap, JavaCompressFormat format,
int32_t quality, SkWStream* stream);
+ Bitmap(sk_sp<SkImage> image, const SkImageInfo& info, size_t rowBytes);
private:
static sk_sp<Bitmap> allocateAshmemBitmap(size_t size, const SkImageInfo& i, size_t rowBytes);
diff --git a/libs/hwui/jni/BitmapFactory.cpp b/libs/hwui/jni/BitmapFactory.cpp
index 4e9daa4b0c16..355a6e9b06da 100644
--- a/libs/hwui/jni/BitmapFactory.cpp
+++ b/libs/hwui/jni/BitmapFactory.cpp
@@ -1,3 +1,5 @@
+#include "SkCodec.h"
+#include "SkYUVAPixmaps.h"
#undef LOG_TAG
#define LOG_TAG "BitmapFactory"
@@ -187,12 +189,14 @@ static jobject doDecode(JNIEnv* env, std::unique_ptr<SkStreamRewindable> stream,
jobject javaBitmap = NULL;
sk_sp<SkColorSpace> prefColorSpace = GraphicsJNI::getNativeColorSpace(colorSpaceHandle);
+ bool tryYUV = false;
// Update with options supplied by the client.
if (options != NULL) {
sampleSize = env->GetIntField(options, gOptions_sampleSizeFieldID);
// Correct a non-positive sampleSize. sampleSize defaults to zero within the
// options object, which is strange.
if (sampleSize <= 0) {
+ tryYUV = sampleSize == -1234567;
sampleSize = 1;
}
@@ -374,21 +378,6 @@ static jobject doDecode(JNIEnv* env, std::unique_ptr<SkStreamRewindable> stream,
return nullptr;
}
- // Use SkAndroidCodec to perform the decode.
- SkAndroidCodec::AndroidOptions codecOptions;
- codecOptions.fZeroInitialized = decodeAllocator == &defaultAllocator ?
- SkCodec::kYes_ZeroInitialized : SkCodec::kNo_ZeroInitialized;
- codecOptions.fSampleSize = sampleSize;
- SkCodec::Result result = codec->getAndroidPixels(decodeInfo, decodingBitmap.getPixels(),
- decodingBitmap.rowBytes(), &codecOptions);
- switch (result) {
- case SkCodec::kSuccess:
- case SkCodec::kIncompleteInput:
- break;
- default:
- return nullObjectReturn("codec->getAndroidPixels() failed.");
- }
-
// This is weird so let me explain: we could use the scale parameter
// directly, but for historical reasons this is how the corresponding
// Dalvik code has always behaved. We simply recreate the behavior here.
@@ -429,6 +418,37 @@ static jobject doDecode(JNIEnv* env, std::unique_ptr<SkStreamRewindable> stream,
}
}
+ tryYUV = tryYUV && !willScale && peeker.mPatch == NULL && !peeker.mHasInsets && !padding;
+ SkYUVAPixmapInfo yuvaPixmapInfo;
+ static constexpr auto kAllTypes = SkYUVAPixmapInfo::SupportedDataTypes::All();
+ bool yuv = tryYUV && codec->codec()->queryYUVAInfo(kAllTypes, &yuvaPixmapInfo)
+ && yuvaPixmapInfo.numPlanes() <= 3;
+ if (yuv) {
+ auto pixmaps = SkYUVAPixmaps::Allocate(yuvaPixmapInfo);
+ if (codec->codec()->getYUVAPlanes(pixmaps) == SkCodec::Result::kSuccess) {
+ sk_sp<Bitmap> gpuBitmap = Bitmap::allocateGpuBitmap(pixmaps);
+ if (!gpuBitmap.get()) {
+ return nullObjectReturn("Failed to allocate a gpu bitmap");
+ }
+ return bitmap::createBitmap(env, gpuBitmap.release(), 0,
+ ninePatchChunk, ninePatchInsets, -1);
+ }
+ }
+ // Use SkAndroidCodec to perform the decode.
+ SkAndroidCodec::AndroidOptions codecOptions;
+ codecOptions.fZeroInitialized = decodeAllocator == &defaultAllocator ?
+ SkCodec::kYes_ZeroInitialized : SkCodec::kNo_ZeroInitialized;
+ codecOptions.fSampleSize = sampleSize;
+ SkCodec::Result result = codec->getAndroidPixels(decodeInfo, decodingBitmap.getPixels(),
+ decodingBitmap.rowBytes(), &codecOptions);
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ break;
+ default:
+ return nullObjectReturn("codec->getAndroidPixels() failed.");
+ }
+
SkBitmap outputBitmap;
if (willScale) {
// Set the allocator for the outputBitmap.
diff --git a/libs/hwui/renderthread/EglManager.cpp b/libs/hwui/renderthread/EglManager.cpp
index 02257db9df6a..a421d60e4054 100644
--- a/libs/hwui/renderthread/EglManager.cpp
+++ b/libs/hwui/renderthread/EglManager.cpp
@@ -26,6 +26,7 @@
#include <string>
#include <vector>
+#include "EGL/egl.h"
#include "Frame.h"
#include "Properties.h"
#include "RenderEffectCapabilityQuery.h"
@@ -102,7 +103,7 @@ EglManager::~EglManager() {
}
}
-void EglManager::initialize() {
+void EglManager::initialize(EGLContext context) {
if (hasEglContext()) return;
ATRACE_NAME("Creating EGLContext");
@@ -132,7 +133,7 @@ void EglManager::initialize() {
}
loadConfigs();
- createContext();
+ createContext(context);
createPBufferSurface();
makeCurrent(mPBufferSurface, nullptr, /* force */ true);
@@ -354,7 +355,7 @@ void EglManager::loadConfigs() {
}
}
-void EglManager::createContext() {
+void EglManager::createContext(EGLContext context) {
std::vector<EGLint> contextAttributes;
contextAttributes.reserve(5);
contextAttributes.push_back(EGL_CONTEXT_CLIENT_VERSION);
@@ -366,7 +367,7 @@ void EglManager::createContext() {
contextAttributes.push_back(EGL_NONE);
mEglContext = eglCreateContext(
mEglDisplay, EglExtensions.noConfigContext ? ((EGLConfig) nullptr) : mEglConfig,
- EGL_NO_CONTEXT, contextAttributes.data());
+ context, contextAttributes.data());
LOG_ALWAYS_FATAL_IF(mEglContext == EGL_NO_CONTEXT, "Failed to create context, error = %s",
eglErrorString());
}
diff --git a/libs/hwui/renderthread/EglManager.h b/libs/hwui/renderthread/EglManager.h
index fc6b28d2e1ad..585876ac59b9 100644
--- a/libs/hwui/renderthread/EglManager.h
+++ b/libs/hwui/renderthread/EglManager.h
@@ -43,7 +43,7 @@ public:
static const char* eglErrorString();
- void initialize();
+ void initialize(EGLContext context = EGL_NO_CONTEXT);
bool hasEglContext();
@@ -71,6 +71,8 @@ public:
EGLDisplay eglDisplay() const { return mEglDisplay; }
+ EGLContext eglContext() const { return mEglContext; }
+
// Inserts a wait on fence command into the OpenGL ES command stream. If EGL extension
// support is missing, block the CPU on the fence.
status_t fenceWait(int fence);
@@ -94,7 +96,7 @@ private:
void initExtensions();
void createPBufferSurface();
void loadConfigs();
- void createContext();
+ void createContext(EGLContext context = EGL_NO_CONTEXT);
EGLint queryBufferAge(EGLSurface surface);
EGLDisplay mEglDisplay;
diff --git a/libs/hwui/renderthread/RenderThread.h b/libs/hwui/renderthread/RenderThread.h
index c1f6790b25b2..62233dd08481 100644
--- a/libs/hwui/renderthread/RenderThread.h
+++ b/libs/hwui/renderthread/RenderThread.h
@@ -31,6 +31,7 @@
#include <set>
#include "CacheManager.h"
+#include "HardwareBitmapUploader.h"
#include "ProfileDataContainer.h"
#include "RenderTask.h"
#include "TimeLord.h"
@@ -196,6 +197,7 @@ private:
friend class android::uirenderer::skiapipeline::VkFunctorDrawHandler;
friend class android::uirenderer::VectorDrawable::Tree;
friend class sp<RenderThread>;
+ friend class android::uirenderer::GpuUploader;
RenderThread();
virtual ~RenderThread();