This commit is contained in:
lunaticbum 2026-04-11 17:32:14 +09:00
parent 2327326cca
commit d0422fafeb
14 changed files with 2300 additions and 133 deletions

View File

@ -93,7 +93,7 @@
<activity
android:name=".home.NeoRssActivity"
android:theme="@style/Theme.LunarLauncher.Starting"
android:theme="@style/Theme.Player"
android:launchMode="singleInstance"
android:screenOrientation="userPortrait"
android:configChanges="orientation|keyboardHidden|keyboard|screenSize|smallestScreenSize|screenLayout|layoutDirection|navigation"
@ -122,10 +122,11 @@
android:launchMode="singleInstance"
android:configChanges="orientation|screenSize|screenLayout|smallestScreenSize"
android:screenOrientation="sensor"
android:hardwareAccelerated="true"
android:exported="false">
</activity>
<activity
android:name=".LauncherActivity"
android:theme="@style/Theme.LunarLauncher.Starting"

View File

@ -41,6 +41,7 @@ add_library(native_renderer SHARED
MediaAsset.cpp
native_player.cpp
PlayerEngine.cpp
sonic.c
)

View File

@ -0,0 +1,74 @@
//
// Created by JIBUM HAN on 2026. 4. 11..
//
#pragma once
#include <queue>
#include <mutex>
#include <condition_variable>
extern "C" {
#include <libavcodec/avcodec.h>
}
class PacketQueue {
private:
std::queue<AVPacket*> queue_;
std::mutex mutex_;
std::condition_variable cond_;
int sizeBytes_ = 0;
bool abortRequest_ = false; // 플레이어 종료 시 대기 중인 스레드를 깨우기 위함
public:
void abort() {
std::lock_guard<std::mutex> lock(mutex_);
abortRequest_ = true;
cond_.notify_all();
}
void start() {
std::lock_guard<std::mutex> lock(mutex_);
abortRequest_ = false;
}
bool push(AVPacket* pkt) {
std::lock_guard<std::mutex> lock(mutex_);
if (abortRequest_) return false;
// 패킷 복사 (메모리 소유권을 큐로 넘김)
AVPacket* pkt1 = av_packet_alloc();
av_packet_move_ref(pkt1, pkt);
queue_.push(pkt1);
sizeBytes_ += pkt1->size;
cond_.notify_one(); // 데이터를 기다리며 자고 있는 디코딩 스레드를 깨움
return true;
}
// 데이터를 꺼낼 때 큐가 비어있으면 block(대기) 상태가 됨
AVPacket* pop() {
std::unique_lock<std::mutex> lock(mutex_);
cond_.wait(lock, [this]() { return !queue_.empty() || abortRequest_; });
if (abortRequest_ || queue_.empty()) return nullptr;
AVPacket* pkt = queue_.front();
queue_.pop();
sizeBytes_ -= pkt->size;
return pkt;
}
void flush() {
std::lock_guard<std::mutex> lock(mutex_);
while (!queue_.empty()) {
AVPacket* pkt = queue_.front();
queue_.pop();
av_packet_free(&pkt);
}
sizeBytes_ = 0;
}
int getSizeBytes() {
std::lock_guard<std::mutex> lock(mutex_);
return sizeBytes_;
}
};

View File

@ -10,6 +10,8 @@
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
const int MIN_BUFFER_SIZE = 2 * 1024 * 1024;
static int custom_read_packet(void *opaque, uint8_t *buf, int buf_size) {
int fd = (int)(intptr_t)opaque;
int ret = read(fd, buf, buf_size);
@ -143,6 +145,10 @@ void PlayerEngine::prepareInternal() {
// 💡 1. 버퍼 크기를 가용 가능한 최대치로 늘려서 비디오 렌더링 딜레이에 대비합니다.
AAudioStream_setBufferSizeInFrames(audio_stream_, AAudioStream_getBufferCapacityInFrames(audio_stream_));
// 🚀 Sonic 초기화 추가 (Sample Rate 48000, Channels 2)
sonic_stream_ = sonicCreateStream(48000, 2);
sonicSetSpeed(sonic_stream_, 1.0f); // 초기 배속 1.0
// 💡 2. 여기서 호출하던 AAudioStream_requestStart(audio_stream_); 를 삭제합니다! (대기 상태로 둠)
AAudioStreamBuilder_delete(builder);
@ -156,23 +162,26 @@ void PlayerEngine::prepareInternal() {
void PlayerEngine::play(ANativeWindow* window) {
if (!isPrepared_) return;
if (isPlaying_) {
isPaused_ = false;
// 💡 일시정지가 풀릴 때 오디오도 다시 시작
if (audio_stream_) AAudioStream_requestStart(audio_stream_);
return;
}
if (isPlaying_) return;
window_ = window;
ANativeWindow_acquire(window_);
isPlaying_ = true;
isPaused_ = false;
abortRequest_ = false;
videoQueue_.start();
audioQueue_.start();
// 💡 처음 재생을 시작할 때 비로소 오디오 엔진을 가동합니다.
if (audio_stream_) AAudioStream_requestStart(audio_stream_);
renderThread_ = std::thread(&PlayerEngine::renderLoop, this);
// 🚀 3개의 스레드 동시 가동
readThread_ = std::thread(&PlayerEngine::readThreadLoop, this);
videoThread_ = std::thread(&PlayerEngine::videoThreadLoop, this);
audioThread_ = std::thread(&PlayerEngine::audioThreadLoop, this);
}
void PlayerEngine::pause() {
isPaused_ = true;
// 💡 영상이 일시정지되면 오디오 버퍼도 소모되지 않게 멈춰줍니다.
@ -182,7 +191,22 @@ void PlayerEngine::pause() {
void PlayerEngine::stop() {
if (!isPlaying_) return;
isPlaying_ = false;
if (renderThread_.joinable()) renderThread_.join();
abortRequest_ = true;
// 큐를 깨워서 멈춰있는 스레드들을 탈출시킴
videoQueue_.abort();
audioQueue_.abort();
if (readThread_.joinable()) readThread_.join();
if (videoThread_.joinable()) videoThread_.join();
if (audioThread_.joinable()) audioThread_.join();
videoQueue_.flush();
audioQueue_.flush();
if (sonic_stream_) {
sonicDestroyStream(sonic_stream_);
sonic_stream_ = nullptr;
}
if (window_) { ANativeWindow_release(window_); window_ = nullptr; }
}
@ -191,7 +215,21 @@ void PlayerEngine::seekBy(double seconds) {
seekReq_ = true;
}
void PlayerEngine::setSpeed(float speed) { playbackSpeed_ = speed > 0.0f ? speed : 1.0f; }
void PlayerEngine::setSpeed(float speed) {
float oldSpeed = playbackSpeed_.load();
playbackSpeed_ = speed > 0.0f ? speed : 1.0f;
// 💡 배속이 종료(1.0으로 복귀)될 때만 특별 처리를 위해 플래그 설정
if (oldSpeed != 1.0f && speed == 1.0f) {
seekReq_ = true; // 강제 Flush를 위해 Seek 로직을 재활용합니다.
isAbsoluteSeek_ = true;
seekTargetSec_ = currentPosSec_; // 현재 비디오 위치로 오디오를 강제 고정
}
if (sonic_stream_) {
sonicSetSpeed(sonic_stream_, playbackSpeed_.load());
}
}
void PlayerEngine::sendSubtitleToKotlin(const char* text) {
if (!jvm_ || !listenerObj_ || !subtitleMethodId_ || !text) return;
@ -203,98 +241,382 @@ void PlayerEngine::sendSubtitleToKotlin(const char* text) {
if (attached) jvm_->DetachCurrentThread();
}
void PlayerEngine::renderLoop() {
LOGI("Player render loop started (AAudio Mode)");
AVFrame* frame = av_frame_alloc();
AVPacket* pkt = av_packet_alloc();
int last_win_w = 0, last_win_h = 0;
double PlayerEngine::getDuration() const {
if (fmt_ctx_ && fmt_ctx_->duration != AV_NOPTS_VALUE) {
return (double)fmt_ctx_->duration / AV_TIME_BASE;
}
return 0.0;
}
while (isPlaying_) {
if (isPaused_) {
std::this_thread::sleep_for(std::chrono::milliseconds(20));
void PlayerEngine::seekTo(double seconds) {
seekTargetSec_ = seconds;
isAbsoluteSeek_ = true;
seekReq_ = true;
}
//void PlayerEngine::renderLoop() {
// LOGI("🚀 Player render loop started (AV Sync & Performance Optimized)");
//
// AVFrame* frame = av_frame_alloc();
// AVPacket* pkt = av_packet_alloc();
// int last_win_w = 0, last_win_h = 0;
//
// // 1. 영상의 실제 FPS 정보를 기반으로 프레임당 지연 시간(마이크로초) 계산
// double fps = av_q2d(fmt_ctx_->streams[video_stream_idx_]->avg_frame_rate);
// if (fps <= 0) fps = 23.976; // 정보가 없을 경우 영화 표준 프레임 레이트 적용
// int64_t frame_delay_us = (int64_t)(1000000.0 / fps);
//
// while (isPlaying_) {
// // 일시정지 처리
// if (isPaused_) {
// std::this_thread::sleep_for(std::chrono::milliseconds(20));
// continue;
// }
//
// // Seek(탐색) 요청 처리
// if (seekReq_) {
// int64_t seek_target;
// if (isAbsoluteSeek_) {
// seek_target = (int64_t)(seekTargetSec_ * AV_TIME_BASE);
// } else {
// seek_target = (int64_t)((currentPosSec_ + seekTargetOffset_) * AV_TIME_BASE);
// }
//
// av_seek_frame(fmt_ctx_, -1, seek_target, AVSEEK_FLAG_BACKWARD);
//
// if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
// if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
//
// if (audio_stream_) {
// AAudioStream_requestPause(audio_stream_);
// AAudioStream_requestFlush(audio_stream_);
// AAudioStream_requestStart(audio_stream_);
// }
// seekReq_ = false;
// isAbsoluteSeek_ = false; // 플래그 초기화
// }
//
// float currentSpeed = playbackSpeed_.load();
//
// // 패킷 읽기
// if (av_read_frame(fmt_ctx_, pkt) < 0) break;
//
// // --- 비디오 스트림 처리 ---
// if (pkt->stream_index == video_stream_idx_) {
// // 💡 프레임 렌더링 시작 시간 기록
// auto frame_start = std::chrono::high_resolution_clock::now();
//
// avcodec_send_packet(video_codec_ctx_, pkt);
// while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
// // 현재 재생 시간(초) 업데이트
// currentPosSec_ = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
//
// if (window_) {
// int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
// if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
// if (sws_ctx_) sws_freeContext(sws_ctx_);
// sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
// last_win_w = w; last_win_h = h;
// }
//
// ANativeWindow_Buffer buffer;
// if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
// uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
// int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
// if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
// ANativeWindow_unlockAndPost(window_);
// }
// }
//
// // 💡 [핵심 동기화] 연산에 소요된 시간을 제외한 나머지 시간만 대기
// auto frame_end = std::chrono::high_resolution_clock::now();
// int64_t actual_render_time = std::chrono::duration_cast<std::chrono::microseconds>(frame_end - frame_start).count();
//
// // 배속(currentSpeed)을 고려한 대기 시간 계산
// int64_t wait_time = (int64_t)((frame_delay_us / currentSpeed) - actual_render_time);
//
// if (wait_time > 0) {
// std::this_thread::sleep_for(std::chrono::microseconds(wait_time));
// }
// }
// }
// // --- 오디오 스트림 처리 ---
// else if (pkt->stream_index == audio_stream_idx_) {
// // 정속 재생(1.0)일 때만 오디오 재생 (오디오 배속은 로직이 복잡하므로 정속 위주 최적화)
// if (currentSpeed == 1.0f) {
// avcodec_send_packet(audio_codec_ctx_, pkt);
// while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
// if (swr_ctx_ && audio_stream_) {
// int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
// uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4);
// int converted_samples = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
//
// if (converted_samples > 0) {
// // 💡 [수정] 타임아웃을 0으로 설정하여 오디오 버퍼가 꽉 차도 비디오 스레드가 멈추지 않게 함
// AAudioStream_write(audio_stream_, out_buf, converted_samples, 0);
// }
// free(out_buf);
// }
// }
// }
// }
// // --- 자막 스트림 처리 ---
// else if (pkt->stream_index == current_sub_stream_idx_) {
// AVSubtitle sub; int got_sub = 0;
// avcodec_decode_subtitle2(sub_codec_ctx_, &sub, &got_sub, pkt);
// if (got_sub) {
// for (unsigned int i = 0; i < sub.num_rects; i++) {
// if (sub.rects[i]->type == SUBTITLE_TEXT && sub.rects[i]->text) sendSubtitleToKotlin(sub.rects[i]->text);
// else if (sub.rects[i]->type == SUBTITLE_ASS && sub.rects[i]->ass) sendSubtitleToKotlin(sub.rects[i]->ass);
// }
// avsubtitle_free(&sub);
// }
// }
// av_packet_unref(pkt);
// }
//
// // 자원 해제 로직
// if (audio_stream_) {
// AAudioStream_requestStop(audio_stream_);
// AAudioStream_close(audio_stream_);
// audio_stream_ = nullptr;
// }
// av_frame_free(&frame);
// av_packet_free(&pkt);
// if (swr_ctx_) swr_free(&swr_ctx_);
// if (sws_ctx_) sws_freeContext(sws_ctx_);
// if (video_codec_ctx_) avcodec_free_context(&video_codec_ctx_);
// if (audio_codec_ctx_) avcodec_free_context(&audio_codec_ctx_);
// if (fmt_ctx_) avformat_close_input(&fmt_ctx_);
//
// LOGI("🏁 Player loop finished gracefully.");
//}
void PlayerEngine::readThreadLoop() {
AVPacket* pkt = av_packet_alloc();
const int MAX_QUEUE_SIZE = 15 * 1024 * 1024;
while (!abortRequest_) {
// 🚀 [탐색(Seek) 처리 로직]
if (seekReq_) {
// 1. 목표 시간 계산
double targetSec = isAbsoluteSeek_ ? seekTargetSec_.load() : (currentPosSec_ + seekTargetOffset_.load());
int64_t seek_target = (int64_t)(targetSec * AV_TIME_BASE);
// 2. FFmpeg 탐색 수행
av_seek_frame(fmt_ctx_, -1, seek_target, AVSEEK_FLAG_BACKWARD);
// 3. 💡 [핵심] 기존에 큐에 쌓여있던 옛날 패킷들을 전부 폭파!
videoQueue_.flush();
audioQueue_.flush();
// 4. 코덱 내부에 남아있는 옛날 프레임 찌꺼기 초기화
// if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
// if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
// if (sonic_stream_) {
// sonicFlushStream(sonic_stream_);
// }
//
// // 5. 오디오 장치 초기화 (틱 잡음 방지)
// if (audio_stream_) {
// AAudioStream_requestPause(audio_stream_);
// AAudioStream_requestFlush(audio_stream_);
// AAudioStream_requestStart(audio_stream_);
// }
videoCodecFlushReq_ = true;
audioCodecFlushReq_ = true;
// 6. 💡 [매우 중요] 마스터 시계 강제 업데이트
// 이거 안 하면 비디오 스레드가 옛날 오디오 시간과 비교하느라 화면이 굳어버립니다.
audioClock_ = targetSec;
currentPosSec_ = targetSec;
seekReq_ = false;
isAbsoluteSeek_ = false;
// 초기화가 끝났으니 다시 패킷을 처음부터 읽으러 올라갑니다.
continue;
}
// 일시정지 상태거나 큐가 너무 꽉 찼다면 잠시 대기
if (isPaused_ || (videoQueue_.getSizeBytes() + audioQueue_.getSizeBytes() > MAX_QUEUE_SIZE)) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
if (seekReq_) {
av_seek_frame(fmt_ctx_, -1, (currentPosSec_ + seekTargetOffset_) * AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
if (av_read_frame(fmt_ctx_, pkt) < 0) {
// EOF (파일 끝) 도달 시 대기
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
if (pkt->stream_index == video_stream_idx_) {
videoQueue_.push(pkt);
} else if (pkt->stream_index == audio_stream_idx_) {
audioQueue_.push(pkt);
}
av_packet_unref(pkt); // 큐 안에서 복사했으므로 원본은 비움
}
av_packet_free(&pkt);
}
void PlayerEngine::audioThreadLoop() {
AVFrame* frame = av_frame_alloc();
const int MIN_BUFFER_SIZE = 2 * 1024 * 1024;
bool isBuffering = true;
const int MAX_SONIC_SAMPLES = 8192;
short* sonic_out_buf = new short[MAX_SONIC_SAMPLES * 2];
while (!abortRequest_) {
if (isPaused_) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
int currentAudioSize = audioQueue_.getSizeBytes();
int currentVideoSize = videoQueue_.getSizeBytes();
int totalQueueSize = currentAudioSize + currentVideoSize;
// 💡 배속 중이든 아니든 이제 무조건 정상적인 버퍼링 로직을 탑니다.
if (totalQueueSize < MIN_BUFFER_SIZE / 2) isBuffering = true;
else if (totalQueueSize >= MIN_BUFFER_SIZE) isBuffering = false;
if (isBuffering && !abortRequest_) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
AVPacket* pkt = audioQueue_.pop();
if (!pkt) break;
// 💡 [추가] 오디오도 스스로 코덱과 Sonic 버퍼를 비웁니다.
if (audioCodecFlushReq_) {
avcodec_flush_buffers(audio_codec_ctx_);
if (sonic_stream_) {
sonicFlushStream(sonic_stream_);
}
// AAudio의 틱 잡음을 막기 위해 재생 중인 버퍼도 비워줍니다.
if (audio_stream_) {
// 💡 안전한 버퍼 초기화를 위해 정지 -> 비움 -> 재시작 순서로 호출
AAudioStream_requestPause(audio_stream_);
AAudioStream_requestFlush(audio_stream_);
AAudioStream_requestStart(audio_stream_);
}
seekReq_ = false;
audioCodecFlushReq_ = false;
}
float currentSpeed = playbackSpeed_.load();
if (av_read_frame(fmt_ctx_, pkt) < 0) break;
avcodec_send_packet(audio_codec_ctx_, pkt);
while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
if (pkt->stream_index == video_stream_idx_) {
avcodec_send_packet(video_codec_ctx_, pkt);
while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
currentPosSec_ = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
if (window_) {
int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
if (sws_ctx_) sws_freeContext(sws_ctx_);
sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
last_win_w = w; last_win_h = h;
}
ANativeWindow_Buffer buffer;
if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
ANativeWindow_unlockAndPost(window_);
}
}
// 마스터 시계 갱신
if (frame->pts != AV_NOPTS_VALUE) {
audioClock_ = frame->pts * av_q2d(fmt_ctx_->streams[audio_stream_idx_]->time_base);
}
if (currentSpeed != 1.0f || audio_stream_idx_ < 0) {
std::this_thread::sleep_for(std::chrono::milliseconds((int)(16/currentSpeed)));
}
}
else if (pkt->stream_index == audio_stream_idx_) {
if (currentSpeed == 1.0f) {
avcodec_send_packet(audio_codec_ctx_, pkt);
while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
if (swr_ctx_ && audio_stream_) {
int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4);
int converted_samples = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
if (converted_samples > 0) {
AAudioStream_write(audio_stream_, out_buf, converted_samples, 1000000000);
if (swr_ctx_ && audio_stream_) {
int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4); // S16 2채널 = 4바이트
// 1. FFmpeg으로 PCM 변환 (정배속 데이터)
int converted = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
if (converted > 0 && sonic_stream_ && !abortRequest_) {
// 🚀 2. 변환된 데이터를 Sonic 스트림에 밀어넣습니다 (입력)
sonicWriteShortToStream(sonic_stream_, (short*)out_buf, converted);
// 🚀 3. Sonic 스트림에서 배속 처리 완료된 데이터를 뽑아냅니다 (출력)
int samplesRead;
do {
// 가공된 데이터 꺼내기 (반환값: 꺼낸 샘플 수)
samplesRead = sonicReadShortFromStream(sonic_stream_, sonic_out_buf, MAX_SONIC_SAMPLES);
if (samplesRead > 0 && !abortRequest_) {
// 4. 가공된 최종 데이터를 사운드 카드(AAudio)에 씁니다!
int32_t framesLeft = samplesRead;
short* currentAAudioBuf = sonic_out_buf;
while (framesLeft > 0 && !abortRequest_) {
int32_t framesWritten = AAudioStream_write(audio_stream_, currentAAudioBuf, framesLeft, 1000000000);
if (framesWritten < 0) break;
framesLeft -= framesWritten;
currentAAudioBuf += framesWritten * 2; // 2채널 전진
}
}
free(out_buf);
}
} while (samplesRead > 0); // Sonic에 꺼낼 데이터가 없을 때까지 반복
}
free(out_buf);
}
}
else if (pkt->stream_index == current_sub_stream_idx_) {
AVSubtitle sub; int got_sub = 0;
avcodec_decode_subtitle2(sub_codec_ctx_, &sub, &got_sub, pkt);
if (got_sub) {
for (unsigned int i = 0; i < sub.num_rects; i++) {
if (sub.rects[i]->type == SUBTITLE_TEXT && sub.rects[i]->text) sendSubtitleToKotlin(sub.rects[i]->text);
else if (sub.rects[i]->type == SUBTITLE_ASS && sub.rects[i]->ass) sendSubtitleToKotlin(sub.rects[i]->ass);
}
avsubtitle_free(&sub);
}
}
av_packet_unref(pkt);
av_packet_free(&pkt);
}
delete[] sonic_out_buf;
av_frame_free(&frame);
}
void PlayerEngine::videoThreadLoop() {
AVFrame* frame = av_frame_alloc();
int last_win_w = 0, last_win_h = 0;
while (!abortRequest_) {
if (isPaused_) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
AVPacket* pkt = videoQueue_.pop();
if (!pkt) break;
// 💡 [추가] 패킷을 밀어 넣기 직전에 청소 명령이 있었는지 확인!
if (videoCodecFlushReq_) {
avcodec_flush_buffers(video_codec_ctx_);
videoCodecFlushReq_ = false;
}
avcodec_send_packet(video_codec_ctx_, pkt);
while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
double videoPtsSec = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
currentPosSec_ = videoPtsSec; // UI 바 업데이트용
// 💡 위대한 귀환: 비디오는 무조건 오디오 시계만 쳐다봅니다!
double delaySec = videoPtsSec - audioClock_.load();
float currentSpeed = playbackSpeed_.load();
// 🚀 [A/V Sync & 프레임 드랍 로직]
if (delaySec > 0.01) {
// 비디오가 미래다 -> 남은 시간만큼 재운다
double waitTime = delaySec / currentSpeed;
std::this_thread::sleep_for(std::chrono::microseconds((int64_t)(waitTime * 1000000.0)));
}
else if (delaySec < -0.05) {
// 비디오가 과거다 -> 그리지 않고 가차없이 버린다 (이것 덕분에 4배속 화면 렌더링 부하가 해결됨!)
continue;
}
// --- 렌더링 통과 ---
if (window_) {
int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
if (sws_ctx_) sws_freeContext(sws_ctx_);
sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
last_win_w = w; last_win_h = h;
}
ANativeWindow_Buffer buffer;
if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
ANativeWindow_unlockAndPost(window_);
}
}
}
av_packet_free(&pkt);
}
if (audio_stream_) { AAudioStream_requestStop(audio_stream_); AAudioStream_close(audio_stream_); audio_stream_ = nullptr; }
av_frame_free(&frame);
av_packet_free(&pkt);
if (swr_ctx_) swr_free(&swr_ctx_);
if (sws_ctx_) sws_freeContext(sws_ctx_);
if (video_codec_ctx_) avcodec_free_context(&video_codec_ctx_);
if (audio_codec_ctx_) avcodec_free_context(&audio_codec_ctx_);
if (fmt_ctx_) avformat_close_input(&fmt_ctx_);
LOGI("Player loop finished gracefully.");
}
std::string PlayerEngine::getSubtitleTracks() { return subtitle_tracks_info_; }

View File

@ -1,10 +1,12 @@
#pragma once
#include "PacketQueue.h"
#include <string>
#include <thread>
#include <atomic>
#include <jni.h>
#include <android/native_window.h>
#include <aaudio/AAudio.h>
#include "sonic.h" // 💡 Sonic 헤더 추가
extern "C" {
#include <libavformat/avformat.h>
@ -31,8 +33,31 @@ public:
std::string getSubtitleTracks();
void setSubtitleTrack(int streamIndex);
double getDuration() const;
void seekTo(double seconds);
private:
void readThreadLoop(); // 패킷을 읽어서 큐에 넣는 스레드 (생산자)
void videoThreadLoop(); // 비디오 큐에서 꺼내서 렌더링하는 스레드 (소비자)
void audioThreadLoop(); // 오디오 큐에서 꺼내서 출력하는 스레드 (소비자)
std::atomic<bool> videoCodecFlushReq_{false};
std::atomic<bool> audioCodecFlushReq_{false};
PacketQueue videoQueue_;
PacketQueue audioQueue_;
std::thread readThread_;
std::thread videoThread_;
std::thread audioThread_;
std::atomic<bool> abortRequest_{false};
// 💡 [A/V Sync의 핵심] 현재 오디오가 어디까지 재생되었는지 기록하는 마스터 시계
std::atomic<double> audioClock_{0.0};
std::atomic<bool> isAbsoluteSeek_{false}; // 절대 위치 이동 여부 플래그
std::atomic<double> seekTargetSec_{0.0};
void prepareInternal(); // 💡 백그라운드 준비 스레드
void renderLoop();
void sendSubtitleToKotlin(const char* text);
@ -75,4 +100,7 @@ private:
jmethodID videoSizeMethodId_ = nullptr;
jmethodID preparedMethodId_ = nullptr; // 💡 JNI 콜백 ID 추가
jmethodID errorMethodId_ = nullptr; // 💡 JNI 에러 콜백 ID 추가
sonicStream sonic_stream_ = nullptr;
};

View File

@ -9,6 +9,18 @@ T* toPlayerNative(jlong handle) { return reinterpret_cast<T*>(handle); }
extern "C" {
JNIEXPORT jdouble JNICALL
Java_bums_lunatic_launcher_player_NativePlayer_nativeGetDuration(JNIEnv *env, jobject thiz, jlong handle) {
PlayerEngine* engine = toPlayerNative<PlayerEngine>(handle);
return engine ? engine->getDuration() : 0.0;
}
JNIEXPORT void JNICALL
Java_bums_lunatic_launcher_player_NativePlayer_nativeSeekTo(JNIEnv *env, jobject thiz, jlong handle, jdouble seconds) {
PlayerEngine* engine = toPlayerNative<PlayerEngine>(handle);
if (engine) engine->seekTo(seconds);
}
JNIEXPORT jlong JNICALL
Java_bums_lunatic_launcher_player_NativePlayer_nativeInit(JNIEnv *env, jobject thiz) {
PlayerEngine* engine = new PlayerEngine(g_vm, thiz);

1246
app/src/main/cpp/sonic.c Normal file

File diff suppressed because it is too large Load Diff

307
app/src/main/cpp/sonic.h Normal file
View File

@ -0,0 +1,307 @@
//
// Created by JIBUM HAN on 2026. 4. 11..
//
#ifndef SONIC_H_
#define SONIC_H_
/* Sonic library
Copyright 2010
Bill Cox
This file is part of the Sonic Library.
This file is licensed under the Apache 2.0 license.
*/
/*
The Sonic Library implements a new algorithm invented by Bill Cox for the
specific purpose of speeding up speech by high factors at high quality. It
generates smooth speech at speed up factors as high as 6X, possibly more. It is
also capable of slowing down speech, and generates high quality results
regardless of the speed up or slow down factor. For speeding up speech by 2X or
more, the following equation is used:
newSamples = period/(speed - 1.0)
scale = 1.0/newSamples;
where period is the current pitch period, determined using AMDF or any other
pitch estimator, and speed is the speedup factor. If the current position in
the input stream is pointed to by "samples", and the current output stream
position is pointed to by "out", then newSamples number of samples can be
generated with:
out[t] = (samples[t]*(newSamples - t) + samples[t + period]*t)/newSamples;
where t = 0 to newSamples - 1.
For speed factors < 2X, the PICOLA algorithm is used. The above
algorithm is first used to double the speed of one pitch period. Then, enough
input is directly copied from the input to the output to achieve the desired
speed up factor, where 1.0 < speed < 2.0. The amount of data copied is derived:
speed = (2*period + length)/(period + length)
speed*length + speed*period = 2*period + length
length(speed - 1) = 2*period - speed*period
length = period*(2 - speed)/(speed - 1)
For slowing down speech where 0.5 < speed < 1.0, a pitch period is inserted into
the output twice, and length of input is copied from the input to the output
until the output desired speed is reached. The length of data copied is:
length = period*(speed - 0.5)/(1 - speed)
For slow down factors below 0.5, no data is copied, and an algorithm
similar to high speed factors is used.
*/
/* Uncomment this to use sin-wav based overlap add which in theory can improve
sound quality slightly, at the expense of lots of floating point math. */
/* #define SONIC_USE_SIN */
#ifdef __cplusplus
extern "C" {
#endif
#ifdef SONIC_INTERNAL
/* The following #define's are used to change the names of the routines defined
* here so that a new library (i.e. speedy) can reuse these names, and then call
* the original names. We do this for two reasons: 1) we don't want to change
* the original API, and 2) we want to add a shim, using the original names and
* still call these routines.
*
* Original users of this API and the libsonic library need to do nothing. The
* original behavior remains.
*
* A new user that add some additional functionality above this library (a shim)
* should #define SONIC_INTERNAL before including this file, undefine all these
* symbols and call the sonicIntXXX functions directly.
*/
#define sonicCreateStream sonicIntCreateStream
#define sonicDestroyStream sonicIntDestroyStream
#define sonicWriteFloatToStream sonicIntWriteFloatToStream
#define sonicWriteShortToStream sonicIntWriteShortToStream
#define sonicWriteUnsignedCharToStream sonicIntWriteUnsignedCharToStream
#define sonicReadFloatFromStream sonicIntReadFloatFromStream
#define sonicReadShortFromStream sonicIntReadShortFromStream
#define sonicReadUnsignedCharFromStream sonicIntReadUnsignedCharFromStream
#define sonicFlushStream sonicIntFlushStream
#define sonicSamplesAvailable sonicIntSamplesAvailable
#define sonicGetSpeed sonicIntGetSpeed
#define sonicSetSpeed sonicIntSetSpeed
#define sonicGetPitch sonicIntGetPitch
#define sonicSetPitch sonicIntSetPitch
#define sonicGetRate sonicIntGetRate
#define sonicSetRate sonicIntSetRate
#define sonicGetVolume sonicIntGetVolume
#define sonicSetVolume sonicIntSetVolume
#define sonicGetQuality sonicIntGetQuality
#define sonicSetQuality sonicIntSetQuality
#define sonicGetSampleRate sonicIntGetSampleRate
#define sonicSetSampleRate sonicIntSetSampleRate
#define sonicGetNumChannels sonicIntGetNumChannels
#define sonicGetUserData sonicIntGetUserData
#define sonicSetUserData sonicIntSetUserData
#define sonicSetNumChannels sonicIntSetNumChannels
#define sonicChangeFloatSpeed sonicIntChangeFloatSpeed
#define sonicChangeShortSpeed sonicIntChangeShortSpeed
#define sonicEnableNonlinearSpeedup sonicIntEnableNonlinearSpeedup
#define sonicSetDurationFeedbackStrength sonicIntSetDurationFeedbackStrength
#define sonicComputeSpectrogram sonicIntComputeSpectrogram
#define sonicGetSpectrogram sonicIntGetSpectrogram
#endif /* SONIC_INTERNAL */
/* This specifies the range of voice pitches we try to match.
Note that if we go lower than 65, we could overflow in findPitchInRange */
#ifndef SONIC_MIN_PITCH
#define SONIC_MIN_PITCH 65
#endif /* SONIC_MIN_PITCH */
#ifndef SONIC_MAX_PITCH
#define SONIC_MAX_PITCH 400
#endif /* SONIC_MAX_PITCH */
/* The following values are used to clamp inputs such as speed to sane values.
*/
#define SONIC_MIN_VOLUME 0.01f
#define SONIC_MAX_VOLUME 100.0f
#define SONIC_MIN_SPEED 0.05f
#define SONIC_MAX_SPEED 20.0f
#define SONIC_MIN_PITCH_SETTING 0.05f
#define SONIC_MAX_PITCH_SETTING 20.0f
#define SONIC_MIN_RATE 0.05f
#define SONIC_MAX_RATE 20.0f
#define SONIC_MIN_SAMPLE_RATE 1000
#define SONIC_MAX_SAMPLE_RATE 500000
#define SONIC_MIN_CHANNELS 1
#define SONIC_MAX_CHANNELS 32
/* These are used to down-sample some inputs to improve speed */
#define SONIC_AMDF_FREQ 4000
struct sonicStreamStruct;
typedef struct sonicStreamStruct* sonicStream;
/* For all of the following functions, numChannels is multiplied by numSamples
to determine the actual number of values read or returned. */
/* Create a sonic stream. Return NULL only if we are out of memory and cannot
allocate the stream. Set numChannels to 1 for mono, and 2 for stereo. */
sonicStream sonicCreateStream(int sampleRate, int numChannels);
/* Destroy the sonic stream. */
void sonicDestroyStream(sonicStream stream);
/* Attach user data to the stream. */
void sonicSetUserData(sonicStream stream, void *userData);
/* Retrieve user data attached to the stream. */
void *sonicGetUserData(sonicStream stream);
/* Use this to write floating point data to be speed up or down into the stream.
Values must be between -1 and 1. Return 0 if memory realloc failed,
otherwise 1 */
int sonicWriteFloatToStream(sonicStream stream, const float* samples, int numSamples);
/* Use this to write 16-bit data to be speed up or down into the stream.
Return 0 if memory realloc failed, otherwise 1 */
int sonicWriteShortToStream(sonicStream stream, const short* samples, int numSamples);
/* Use this to write 8-bit unsigned data to be speed up or down into the stream.
Return 0 if memory realloc failed, otherwise 1 */
int sonicWriteUnsignedCharToStream(sonicStream stream, const unsigned char* samples,
int numSamples);
/* Use this to read floating point data out of the stream. Sometimes no data
will be available, and zero is returned, which is not an error condition. */
int sonicReadFloatFromStream(sonicStream stream, float* samples,
int maxSamples);
/* Use this to read 16-bit data out of the stream. Sometimes no data will
be available, and zero is returned, which is not an error condition. */
int sonicReadShortFromStream(sonicStream stream, short* samples,
int maxSamples);
/* Use this to read 8-bit unsigned data out of the stream. Sometimes no data
will be available, and zero is returned, which is not an error condition. */
int sonicReadUnsignedCharFromStream(sonicStream stream, unsigned char* samples,
int maxSamples);
/* Force the sonic stream to generate output using whatever data it currently
has. No extra delay will be added to the output, but flushing in the middle
of words could introduce distortion. */
int sonicFlushStream(sonicStream stream);
/* Return the number of samples in the output buffer */
int sonicSamplesAvailable(sonicStream stream);
/* Get the speed of the stream. */
float sonicGetSpeed(sonicStream stream);
/* Set the speed of the stream. */
void sonicSetSpeed(sonicStream stream, float speed);
/* Get the pitch of the stream. */
float sonicGetPitch(sonicStream stream);
/* Set the pitch of the stream. */
void sonicSetPitch(sonicStream stream, float pitch);
/* Get the rate of the stream. */
float sonicGetRate(sonicStream stream);
/* Set the rate of the stream. */
void sonicSetRate(sonicStream stream, float rate);
/* Get the scaling factor of the stream. */
float sonicGetVolume(sonicStream stream);
/* Set the scaling factor of the stream. */
void sonicSetVolume(sonicStream stream, float volume);
/* Chord pitch is DEPRECATED. AFAIK, it was never used by anyone. These
functions still exist to avoid breaking existing code. */
/* Get the chord pitch setting. */
int sonicGetChordPitch(sonicStream stream);
/* Set chord pitch mode on or off. Default is off. See the documentation
page for a description of this feature. */
void sonicSetChordPitch(sonicStream stream, int useChordPitch);
/* Get the quality setting. */
int sonicGetQuality(sonicStream stream);
/* Set the "quality". Default 0 is virtually as good as 1, but very much
* faster. */
void sonicSetQuality(sonicStream stream, int quality);
/* Get the sample rate of the stream. */
int sonicGetSampleRate(sonicStream stream);
/* Set the sample rate of the stream. This will drop any samples that have not
* been read. */
void sonicSetSampleRate(sonicStream stream, int sampleRate);
/* Get the number of channels. */
int sonicGetNumChannels(sonicStream stream);
/* Set the number of channels. This will drop any samples that have not been
* read. */
void sonicSetNumChannels(sonicStream stream, int numChannels);
/* This is a non-stream oriented interface to just change the speed of a sound
sample. It works in-place on the sample array, so there must be at least
speed*numSamples available space in the array. Returns the new number of
samples. */
int sonicChangeFloatSpeed(float* samples, int numSamples, float speed,
float pitch, float rate, float volume,
int useChordPitch, int sampleRate, int numChannels);
/* This is a non-stream oriented interface to just change the speed of a sound
sample. It works in-place on the sample array, so there must be at least
speed*numSamples available space in the array. Returns the new number of
samples. */
int sonicChangeShortSpeed(short* samples, int numSamples, float speed,
float pitch, float rate, float volume,
int useChordPitch, int sampleRate, int numChannels);
#ifdef SONIC_SPECTROGRAM
/*
This code generates high quality spectrograms from sound samples, using
Time-Aliased-FFTs as described at:
https://github.com/waywardgeek/spectrogram
Basically, two adjacent pitch periods are overlap-added to create a sound
sample that accurately represents the speech sound at that moment in time.
This set of samples is converted to a spetral line using an FFT, and the result
is saved as a single spectral line at that moment in time. The resulting
spectral lines vary in resolution (it is equal to the number of samples in the
pitch period), and the spacing of spectral lines also varies (proportional to
the numver of samples in the pitch period).
To generate a bitmap, linear interpolation is used to render the grayscale
value at any particular point in time and frequency.
*/
#define SONIC_MAX_SPECTRUM_FREQ 5000
struct sonicSpectrogramStruct;
struct sonicBitmapStruct;
typedef struct sonicSpectrogramStruct* sonicSpectrogram;
typedef struct sonicBitmapStruct* sonicBitmap;
/* sonicBitmap objects represent spectrograms as grayscale bitmaps where each
pixel is from 0 (black) to 255 (white). Bitmaps are rows*cols in size.
Rows are indexed top to bottom and columns are indexed left to right */
struct sonicBitmapStruct {
unsigned char* data;
int numRows;
int numCols;
};
/* Enable coomputation of a spectrogram on the fly. */
void sonicComputeSpectrogram(sonicStream stream);
/* Get the spectrogram. */
sonicSpectrogram sonicGetSpectrogram(sonicStream stream);
/* Create an empty spectrogram. Called automatically if sonicComputeSpectrogram
has been called. */
sonicSpectrogram sonicCreateSpectrogram(int sampleRate);
/* Destroy the spectrotram. This is called automatically when calling
sonicDestroyStream. */
void sonicDestroySpectrogram(sonicSpectrogram spectrogram);
/* Convert the spectrogram to a bitmap. Caller must destroy bitmap when done. */
sonicBitmap sonicConvertSpectrogramToBitmap(sonicSpectrogram spectrogram,
int numRows, int numCols);
/* Destroy a bitmap returned by sonicConvertSpectrogramToBitmap. */
void sonicDestroyBitmap(sonicBitmap bitmap);
int sonicWritePGM(sonicBitmap bitmap, char* fileName);
/* Add two pitch periods worth of samples to the spectrogram. There must be
2*period samples. Time should advance one pitch period for each call to
this function. */
void sonicAddPitchPeriodToSpectrogram(sonicSpectrogram spectrogram,
short* samples, int numSamples,
int numChannels);
#endif /* SONIC_SPECTROGRAM */
#ifdef __cplusplus
}
#endif
#endif /* SONIC_H_ */

View File

@ -951,16 +951,26 @@ open class LauncherActivity : CommonActivity() {
private val messageListener = MessageClient.OnMessageReceivedListener { messageEvent ->
when (messageEvent.path) {
"/gesture/right",
"/gesture/next" -> {
// 시계에서 오른쪽으로 휘둘렀을 때: 예) 다음 배경화면으로 변경
}
"/gesture/left",
"/gesture/prev" -> {
onSwipeRight()
// 시계에서 왼쪽으로 휘둘렀을 때: 예) 앱 서랍 열기
// showToast("시계 제스처: 앱 서랍")
}
"/gesture/down" ->{
onSwipeDown()
}
"/gesture/up"->{
val intent = Intent("ACTION_NEXT_WALLPAPER").setPackage(packageName)
sendBroadcast(intent)
showToast("시계 제스처: 다음 배경화면")
}
"/gesture/prev" -> {
// 시계에서 왼쪽으로 휘둘렀을 때: 예) 앱 서랍 열기
showAppDrawer()
showToast("시계 제스처: 앱 서랍")
// showAppDrawer()
}
}
}

View File

@ -168,6 +168,8 @@ class CompletedFilesFragment : Fragment() {
loadFiles()
} else {
if (extVideos.contains(file.extension.lowercase())) {
trackFileAccess(file.name)
loadFiles()
val intent = Intent(requireContext(), PlayerActivity::class.java).apply {
putExtra("VIDEO_PATH", file.absolutePath)
}

View File

@ -152,7 +152,9 @@ open class NeoRssActivity : CommonActivity() {
//// showContents(R.id.close)
// }
// }
when (messageEvent.path) {
"/gesture/right",
"/gesture/next" -> {
when(currentFragment) {
is RssHome ->{
@ -173,9 +175,12 @@ open class NeoRssActivity : CommonActivity() {
}
}
}
"/gesture/left",
"/gesture/prev" -> {
currentFragment.onRemoteLeft(false)
}
"/gesture/up" ->{}
"/gesture/down" ->{}
}
}

View File

@ -24,6 +24,19 @@ class NativePlayer {
fun seekBy(sec: Double) = nativeSeekBy(nativeHandle, sec)
fun setSpeed(speed: Float) = nativeSetSpeed(nativeHandle, speed)
// 외부 함수 호출 정의
fun getDuration(): Double {
return if (nativeHandle != 0L) nativeGetDuration(nativeHandle) else 0.0
}
fun seekTo(sec: Double) {
if (nativeHandle != 0L) nativeSeekTo(nativeHandle, sec)
}
// 하단에 external fun 추가
private external fun nativeGetDuration(h: Long): Double
private external fun nativeSeekTo(h: Long, s: Double)
@Suppress("unused")
private fun onSubtitleTextDecoded(text: String) {
subtitleCallback?.invoke(text)

View File

@ -93,6 +93,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
onPreparedListener = {
runOnUiThread {
loadAvailableSubtitles()
startUIUpdateLoop()
if (allSubtitleTracks.size > 1) {
showSubtitleSelectionDialog()
} else {
@ -114,6 +115,11 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
setupGestures()
}
private lateinit var seekBar: android.widget.SeekBar
private lateinit var tvTime: TextView
private var uiUpdateJob: Job? = null
private var isUserSeeking = false
private fun setupUI() {
val root = FrameLayout(this).apply { setBackgroundColor(Color.BLACK) }
@ -156,11 +162,80 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
root.addView(gestureLayer)
root.addView(btnRotate, FrameLayout.LayoutParams(150, 150, Gravity.BOTTOM or Gravity.START).apply { setMargins(30,0,0,30) })
root.addView(btnHideVideo, FrameLayout.LayoutParams(150, 150, Gravity.BOTTOM or Gravity.END).apply { setMargins(0,0,30,30) })
val bottomControlLayout = android.widget.LinearLayout(this).apply {
orientation = android.widget.LinearLayout.HORIZONTAL
gravity = Gravity.CENTER_VERTICAL
setBackgroundColor(Color.parseColor("#99000000")) // 반투명 검은색 배경
setPadding(40, 20, 40, 20)
}
tvTime = TextView(this).apply {
setTextColor(Color.WHITE)
textSize = 16f
text = "00:00 / 00:00"
}
seekBar = android.widget.SeekBar(this).apply {
layoutParams = android.widget.LinearLayout.LayoutParams(0, -2, 1f).apply {
setMargins(30, 0, 30, 0)
}
setOnSeekBarChangeListener(object : android.widget.SeekBar.OnSeekBarChangeListener {
override fun onProgressChanged(bar: android.widget.SeekBar?, progress: Int, fromUser: Boolean) {
if (fromUser) {
val duration = nativePlayer?.getDuration() ?: 0.0
tvTime.text = "${formatTime(progress.toDouble())} / ${formatTime(duration)}"
}
}
override fun onStartTrackingTouch(bar: android.widget.SeekBar?) {
isUserSeeking = true // 사용자가 잡고 있을 때는 자동 업데이트 중지
}
override fun onStopTrackingTouch(bar: android.widget.SeekBar?) {
isUserSeeking = false
val targetSec = bar?.progress?.toDouble() ?: 0.0
nativePlayer?.seekTo(targetSec) // 원하는 위치로 이동 요청
}
})
}
bottomControlLayout.addView(tvTime)
bottomControlLayout.addView(seekBar)
root.addView(bottomControlLayout, FrameLayout.LayoutParams(-1, -2, Gravity.BOTTOM))
setContentView(root)
hideSystemUI()
}
private fun formatTime(seconds: Double): String {
val totalSec = seconds.toInt()
val h = totalSec / 3600
val m = (totalSec % 3600) / 60
val s = totalSec % 60
return if (h > 0) String.format("%02d:%02d:%02d", h, m, s) else String.format("%02d:%02d", m, s)
}
// UI 갱신 루프 시작 함수 추가
private fun startUIUpdateLoop() {
uiUpdateJob?.cancel()
uiUpdateJob = CoroutineScope(Dispatchers.Main).launch {
while (isActive) {
if (isPlaying && !isUserSeeking) {
val currentPos = nativePlayer?.getCurrentPosition() ?: 0.0
val duration = nativePlayer?.getDuration() ?: 0.0
if (duration > 0) {
seekBar.max = duration.toInt()
seekBar.progress = currentPos.toInt()
tvTime.text = "${formatTime(currentPos)} / ${formatTime(duration)}"
}
}
delay(500) // 0.5초마다 UI 갱신
}
}
}
override fun onSurfaceTextureAvailable(st: SurfaceTexture, w: Int, h: Int) {
val videoFile = File(videoPath)
if (videoFile.exists()) {
@ -232,7 +307,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
val rightDetector = GestureDetector(this, object : GestureDetector.SimpleOnGestureListener() {
override fun onLongPress(e: MotionEvent) { nativePlayer?.setSpeed(4.0f) }
override fun onSingleTapUp(e: MotionEvent): Boolean {
nativePlayer?.seekBy(20.0)
nativePlayer?.seekBy(40.0)
return true
}
})
@ -329,19 +404,31 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
private fun cleanSubtitleText(text: String): String = text.replace(Regex("\\{.*?\\}"), "")
var lastSubTitle : String = ""
// 💡 클래스 상단에 인덱스 기억용 변수 하나만 추가해 주세요.
private var currentSubtitleIndex = 0
private fun startSubtitleSyncLoop() {
subtitleSyncJob?.cancel()
// 💡 자막이 아예 없다면 작업을 시작조차 하지 않음
if (externalSubtitles.isEmpty()) {
subtitleView.visibility = View.INVISIBLE
return
}
// 💡 Dispatchers.Main 대신 Default에서 연산하고 UI만 Main에서 갱신하는 것이 버퍼링 방지에 좋습니다.
subtitleSyncJob = CoroutineScope(Dispatchers.Main).launch {
while (isActive) {
if (isPlaying) {
val currentSec = nativePlayer?.getCurrentPosition() ?: 0.0
val currentSub = externalSubtitles.find { currentSec in it.startSec..it.endSec }
// 💡 최적화된 인덱스 탐색 함수 호출
val currentSub = findSubtitleIndexed(currentSec)
if (currentSub != null) {
// 💡 번역본이 존재하면 [번역본] + [줄바꿈] + [원본] 형태로 보여주거나, 번역본만 보여줍니다.
val displayText = if (currentSub.translatedText != null) {
"${currentSub.translatedText}\n${cleanSubtitleText(currentSub.text)}"
// (원본이 보기 싫다면 그냥 currentSub.translatedText 만 넣으셔도 됩니다!)
} else {
cleanSubtitleText(currentSub.text)
}
@ -355,11 +442,36 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
subtitleView.visibility = View.INVISIBLE
}
}
delay(100)
delay(200)
}
}
}
/**
* 💡 인덱스 기반 탐색: 이전 위치부터 찾기 때문에 CPU 부하가 거의 없습니다.
*/
private fun findSubtitleIndexed(currentSec: Double): SubtitleBlock? {
// 1. 영상이 뒤로 감기 되었을 경우 인덱스 초기화
if (currentSubtitleIndex >= externalSubtitles.size ||
externalSubtitles[currentSubtitleIndex].startSec > currentSec) {
currentSubtitleIndex = 0
}
// 2. 마지막으로 찾았던 위치(currentSubtitleIndex)부터 탐색 시작
for (i in currentSubtitleIndex until externalSubtitles.size) {
val item = externalSubtitles[i]
if (currentSec in item.startSec..item.endSec) {
currentSubtitleIndex = i // 현재 위치 저장
return item
}
// 3. 자막이 시간순으로 정렬되어 있다면, 현재 시간보다 시작 시간이 커지는 순간 루프 종료
if (item.startSec > currentSec) break
}
return null
}
private fun readTextWithEncoding(file: File): String {
val bytes = file.readBytes()
@ -450,6 +562,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
} catch (e: Exception) {
Log.e("PlayerActivity", "Subtitle parsing error", e)
}
result.sortBy { it.startSec }
return result
}
@ -561,5 +674,6 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
nativePlayer?.destroy()
leftLongPressJob?.cancel()
subtitleSyncJob?.cancel()
uiUpdateJob?.cancel()
}
}

View File

@ -4,6 +4,7 @@ import android.app.PendingIntent
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
import android.util.Log
import androidx.wear.protolayout.ActionBuilders
import androidx.wear.protolayout.ColorBuilders.argb
import androidx.wear.protolayout.DimensionBuilders.dp
@ -38,7 +39,39 @@ class MainTileService : SuspendingTileService() {
override suspend fun tileRequest(
requestParams: RequestBuilders.TileRequest
) = tile(requestParams, this)
): TileBuilders.Tile {
// 💡 사용자가 방금 누른 버튼의 ID(path)를 가져옵니다.
val clickedPath = requestParams.currentState.lastClickableId
// 눌린 버튼이 있다면 폰(런처)으로 메시지 전송!
if (clickedPath.isNotEmpty()) {
Log.d("MainTileService", "Clicked path: $clickedPath")
Wearable.getNodeClient(this).connectedNodes.addOnSuccessListener { nodes ->
for (node in nodes) {
Wearable.getMessageClient(this).sendMessage(node.id, clickedPath, null)
}
}
}
// 타일 UI 렌더링
val singleTileTimeline = TimelineBuilders.Timeline.Builder()
.addTimelineEntry(
TimelineBuilders.TimelineEntry.Builder()
.setLayout(
LayoutElementBuilders.Layout.Builder()
.setRoot(tileLayout(requestParams, this))
.build()
)
.build()
)
.build()
return TileBuilders.Tile.Builder()
.setResourcesVersion(RESOURCES_VERSION)
.setTileTimeline(singleTileTimeline)
.build()
}
}
private fun resources(
@ -73,57 +106,56 @@ private fun tile(
private fun tileLayout(requestParams: RequestBuilders.TileRequest, context: Context): LayoutElementBuilders.LayoutElement {
fun createNavButton(label: String, path: String,x : Float, y :Float): LayoutElementBuilders.LayoutElement {
// 리시버를 실행하기 위한 PendingIntent 생성
val intent = Intent(context, TileActionReceiver::class.java).apply {
putExtra("path", path)
}
val pendingIntent = PendingIntent.getBroadcast(
context, path.hashCode(), intent,
PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE
)
// 버튼 생성 함수 (크기를 48dp로 살짝 줄여서 화면에 쏙 들어가게 맞춤)
fun createNavButton(label: String, path: String): LayoutElementBuilders.LayoutElement {
return LayoutElementBuilders.Box.Builder()
.setModifiers(ModifiersBuilders.Modifiers.Builder()
.setClickable(ModifiersBuilders.Clickable.Builder()
.setOnClick(ActionBuilders.LaunchAction.Builder()
.setAndroidActivity(ActionBuilders.AndroidActivity.Builder()
.setPackageName(context.packageName)
.setClassName(TileActionReceiver::class.java.name) // 리시버 호출
.build())
.build())
.build())
.setTransformation(ModifiersBuilders.Transformation.Builder()
// 방향에 따라 x, y 좌표 조절 (상: 0,-45 / 하: 0,45 / 좌: -45,0 / 우: 45,0)
.setTranslationX(dp(x))
.setTranslationY(dp(y))
.setId(path)
.setOnClick(ActionBuilders.LoadAction.Builder().build())
.build())
.setBackground(ModifiersBuilders.Background.Builder()
.setColor(argb(0xFF303030.toInt()))
.setCorner(ModifiersBuilders.Corner.Builder().setRadius(dp(15f)).build())
.setCorner(ModifiersBuilders.Corner.Builder().setRadius(dp(24f)).build()) // 원형에 가깝게
.build())
.build())
.setWidth(dp(56f)).setHeight(dp(56f))
.addContent(Text.Builder(context, label).build())
.setWidth(dp(48f)).setHeight(dp(48f))
.addContent(
Text.Builder(context, label)
.setTypography(Typography.TYPOGRAPHY_CAPTION2) // 글자 크기 최적화
.setColor(argb(0xFFFFFFFF.toInt()))
.build()
)
.build()
}
return PrimaryLayout.Builder(requestParams.deviceConfiguration)
.setContent(
LayoutElementBuilders.Box.Builder()
// 상 (UP): X는 그대로, Y만 위로(-45dp)
.addContent(createNavButton("UP", "/gesture/up", 0f, -50f))
// 1층: 제일 위에 UP 버튼 하나만
val topRow = LayoutElementBuilders.Row.Builder()
.addContent(createNavButton("UP", "/gesture/up"))
.build()
// 하 (DOWN): X는 그대로, Y만 아래로(+50dp)
.addContent(createNavButton("DOWN", "/gesture/down", 0f, 50f))
// 2층: 중간에 LEFT, 빈 공간, RIGHT 배치
val middleRow = LayoutElementBuilders.Row.Builder()
.addContent(createNavButton("LEFT", "/gesture/left"))
// 좌우 버튼 사이의 널찍한 빈 공간 (UP/DOWN 버튼이 들어갈 간격)
.addContent(LayoutElementBuilders.Spacer.Builder().setWidth(dp(56f)).build())
.addContent(createNavButton("RIGHT", "/gesture/right"))
.build()
// 좌 (LEFT): X를 왼쪽으로(-50dp), Y는 그대로
.addContent(createNavButton("LEFT", "/gesture/left", -50f, 0f))
// 3층: 제일 아래에 DOWN 버튼 하나만
val bottomRow = LayoutElementBuilders.Row.Builder()
.addContent(createNavButton("DOWN", "/gesture/down"))
.build()
// 우 (RIGHT): X를 오른쪽으로(+50dp), Y는 그대로
.addContent(createNavButton("RIGHT", "/gesture/right", 50f, 0f))
.build()
).build()
// 전체를 세로로 정렬하여 합체
return LayoutElementBuilders.Column.Builder()
.setHorizontalAlignment(LayoutElementBuilders.HORIZONTAL_ALIGN_CENTER) // 가운데 정렬 필수
.addContent(topRow)
.addContent(LayoutElementBuilders.Spacer.Builder().setHeight(dp(8f)).build()) // 1~2층 간격
.addContent(middleRow)
.addContent(LayoutElementBuilders.Spacer.Builder().setHeight(dp(8f)).build()) // 2~3층 간격
.addContent(bottomRow)
.build()
}