diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml
index a6d8490d..29a18ed2 100644
--- a/app/src/main/AndroidManifest.xml
+++ b/app/src/main/AndroidManifest.xml
@@ -93,7 +93,7 @@
-
+
+#include
+#include
+
+extern "C" {
+#include
+}
+
+class PacketQueue {
+private:
+ std::queue queue_;
+ std::mutex mutex_;
+ std::condition_variable cond_;
+ int sizeBytes_ = 0;
+ bool abortRequest_ = false; // 플레이어 종료 시 대기 중인 스레드를 깨우기 위함
+
+public:
+ void abort() {
+ std::lock_guard lock(mutex_);
+ abortRequest_ = true;
+ cond_.notify_all();
+ }
+
+ void start() {
+ std::lock_guard lock(mutex_);
+ abortRequest_ = false;
+ }
+
+ bool push(AVPacket* pkt) {
+ std::lock_guard lock(mutex_);
+ if (abortRequest_) return false;
+
+ // 패킷 복사 (메모리 소유권을 큐로 넘김)
+ AVPacket* pkt1 = av_packet_alloc();
+ av_packet_move_ref(pkt1, pkt);
+
+ queue_.push(pkt1);
+ sizeBytes_ += pkt1->size;
+ cond_.notify_one(); // 데이터를 기다리며 자고 있는 디코딩 스레드를 깨움
+ return true;
+ }
+
+ // 데이터를 꺼낼 때 큐가 비어있으면 block(대기) 상태가 됨
+ AVPacket* pop() {
+ std::unique_lock lock(mutex_);
+ cond_.wait(lock, [this]() { return !queue_.empty() || abortRequest_; });
+
+ if (abortRequest_ || queue_.empty()) return nullptr;
+
+ AVPacket* pkt = queue_.front();
+ queue_.pop();
+ sizeBytes_ -= pkt->size;
+ return pkt;
+ }
+
+ void flush() {
+ std::lock_guard lock(mutex_);
+ while (!queue_.empty()) {
+ AVPacket* pkt = queue_.front();
+ queue_.pop();
+ av_packet_free(&pkt);
+ }
+ sizeBytes_ = 0;
+ }
+
+ int getSizeBytes() {
+ std::lock_guard lock(mutex_);
+ return sizeBytes_;
+ }
+};
\ No newline at end of file
diff --git a/app/src/main/cpp/PlayerEngine.cpp b/app/src/main/cpp/PlayerEngine.cpp
index aa92defa..74534055 100644
--- a/app/src/main/cpp/PlayerEngine.cpp
+++ b/app/src/main/cpp/PlayerEngine.cpp
@@ -10,6 +10,8 @@
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
+const int MIN_BUFFER_SIZE = 2 * 1024 * 1024;
+
static int custom_read_packet(void *opaque, uint8_t *buf, int buf_size) {
int fd = (int)(intptr_t)opaque;
int ret = read(fd, buf, buf_size);
@@ -143,6 +145,10 @@ void PlayerEngine::prepareInternal() {
// 💡 1. 버퍼 크기를 가용 가능한 최대치로 늘려서 비디오 렌더링 딜레이에 대비합니다.
AAudioStream_setBufferSizeInFrames(audio_stream_, AAudioStream_getBufferCapacityInFrames(audio_stream_));
+ // 🚀 Sonic 초기화 추가 (Sample Rate 48000, Channels 2)
+ sonic_stream_ = sonicCreateStream(48000, 2);
+ sonicSetSpeed(sonic_stream_, 1.0f); // 초기 배속 1.0
+
// 💡 2. 여기서 호출하던 AAudioStream_requestStart(audio_stream_); 를 삭제합니다! (대기 상태로 둠)
AAudioStreamBuilder_delete(builder);
@@ -156,23 +162,26 @@ void PlayerEngine::prepareInternal() {
void PlayerEngine::play(ANativeWindow* window) {
if (!isPrepared_) return;
- if (isPlaying_) {
- isPaused_ = false;
- // 💡 일시정지가 풀릴 때 오디오도 다시 시작
- if (audio_stream_) AAudioStream_requestStart(audio_stream_);
- return;
- }
+ if (isPlaying_) return;
+
window_ = window;
ANativeWindow_acquire(window_);
isPlaying_ = true;
- isPaused_ = false;
+ abortRequest_ = false;
+
+ videoQueue_.start();
+ audioQueue_.start();
- // 💡 처음 재생을 시작할 때 비로소 오디오 엔진을 가동합니다.
if (audio_stream_) AAudioStream_requestStart(audio_stream_);
- renderThread_ = std::thread(&PlayerEngine::renderLoop, this);
+ // 🚀 3개의 스레드 동시 가동
+ readThread_ = std::thread(&PlayerEngine::readThreadLoop, this);
+ videoThread_ = std::thread(&PlayerEngine::videoThreadLoop, this);
+ audioThread_ = std::thread(&PlayerEngine::audioThreadLoop, this);
}
+
+
void PlayerEngine::pause() {
isPaused_ = true;
// 💡 영상이 일시정지되면 오디오 버퍼도 소모되지 않게 멈춰줍니다.
@@ -182,7 +191,22 @@ void PlayerEngine::pause() {
void PlayerEngine::stop() {
if (!isPlaying_) return;
isPlaying_ = false;
- if (renderThread_.joinable()) renderThread_.join();
+ abortRequest_ = true;
+
+ // 큐를 깨워서 멈춰있는 스레드들을 탈출시킴
+ videoQueue_.abort();
+ audioQueue_.abort();
+
+ if (readThread_.joinable()) readThread_.join();
+ if (videoThread_.joinable()) videoThread_.join();
+ if (audioThread_.joinable()) audioThread_.join();
+
+ videoQueue_.flush();
+ audioQueue_.flush();
+ if (sonic_stream_) {
+ sonicDestroyStream(sonic_stream_);
+ sonic_stream_ = nullptr;
+ }
if (window_) { ANativeWindow_release(window_); window_ = nullptr; }
}
@@ -191,7 +215,21 @@ void PlayerEngine::seekBy(double seconds) {
seekReq_ = true;
}
-void PlayerEngine::setSpeed(float speed) { playbackSpeed_ = speed > 0.0f ? speed : 1.0f; }
+void PlayerEngine::setSpeed(float speed) {
+ float oldSpeed = playbackSpeed_.load();
+ playbackSpeed_ = speed > 0.0f ? speed : 1.0f;
+
+
+ // 💡 배속이 종료(1.0으로 복귀)될 때만 특별 처리를 위해 플래그 설정
+ if (oldSpeed != 1.0f && speed == 1.0f) {
+ seekReq_ = true; // 강제 Flush를 위해 Seek 로직을 재활용합니다.
+ isAbsoluteSeek_ = true;
+ seekTargetSec_ = currentPosSec_; // 현재 비디오 위치로 오디오를 강제 고정
+ }
+ if (sonic_stream_) {
+ sonicSetSpeed(sonic_stream_, playbackSpeed_.load());
+ }
+}
void PlayerEngine::sendSubtitleToKotlin(const char* text) {
if (!jvm_ || !listenerObj_ || !subtitleMethodId_ || !text) return;
@@ -203,98 +241,382 @@ void PlayerEngine::sendSubtitleToKotlin(const char* text) {
if (attached) jvm_->DetachCurrentThread();
}
-void PlayerEngine::renderLoop() {
- LOGI("Player render loop started (AAudio Mode)");
- AVFrame* frame = av_frame_alloc();
- AVPacket* pkt = av_packet_alloc();
- int last_win_w = 0, last_win_h = 0;
+double PlayerEngine::getDuration() const {
+ if (fmt_ctx_ && fmt_ctx_->duration != AV_NOPTS_VALUE) {
+ return (double)fmt_ctx_->duration / AV_TIME_BASE;
+ }
+ return 0.0;
+}
- while (isPlaying_) {
- if (isPaused_) {
- std::this_thread::sleep_for(std::chrono::milliseconds(20));
+void PlayerEngine::seekTo(double seconds) {
+ seekTargetSec_ = seconds;
+ isAbsoluteSeek_ = true;
+ seekReq_ = true;
+}
+
+//void PlayerEngine::renderLoop() {
+// LOGI("🚀 Player render loop started (AV Sync & Performance Optimized)");
+//
+// AVFrame* frame = av_frame_alloc();
+// AVPacket* pkt = av_packet_alloc();
+// int last_win_w = 0, last_win_h = 0;
+//
+// // 1. 영상의 실제 FPS 정보를 기반으로 프레임당 지연 시간(마이크로초) 계산
+// double fps = av_q2d(fmt_ctx_->streams[video_stream_idx_]->avg_frame_rate);
+// if (fps <= 0) fps = 23.976; // 정보가 없을 경우 영화 표준 프레임 레이트 적용
+// int64_t frame_delay_us = (int64_t)(1000000.0 / fps);
+//
+// while (isPlaying_) {
+// // 일시정지 처리
+// if (isPaused_) {
+// std::this_thread::sleep_for(std::chrono::milliseconds(20));
+// continue;
+// }
+//
+// // Seek(탐색) 요청 처리
+// if (seekReq_) {
+// int64_t seek_target;
+// if (isAbsoluteSeek_) {
+// seek_target = (int64_t)(seekTargetSec_ * AV_TIME_BASE);
+// } else {
+// seek_target = (int64_t)((currentPosSec_ + seekTargetOffset_) * AV_TIME_BASE);
+// }
+//
+// av_seek_frame(fmt_ctx_, -1, seek_target, AVSEEK_FLAG_BACKWARD);
+//
+// if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
+// if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
+//
+// if (audio_stream_) {
+// AAudioStream_requestPause(audio_stream_);
+// AAudioStream_requestFlush(audio_stream_);
+// AAudioStream_requestStart(audio_stream_);
+// }
+// seekReq_ = false;
+// isAbsoluteSeek_ = false; // 플래그 초기화
+// }
+//
+// float currentSpeed = playbackSpeed_.load();
+//
+// // 패킷 읽기
+// if (av_read_frame(fmt_ctx_, pkt) < 0) break;
+//
+// // --- 비디오 스트림 처리 ---
+// if (pkt->stream_index == video_stream_idx_) {
+// // 💡 프레임 렌더링 시작 시간 기록
+// auto frame_start = std::chrono::high_resolution_clock::now();
+//
+// avcodec_send_packet(video_codec_ctx_, pkt);
+// while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
+// // 현재 재생 시간(초) 업데이트
+// currentPosSec_ = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
+//
+// if (window_) {
+// int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
+// if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
+// if (sws_ctx_) sws_freeContext(sws_ctx_);
+// sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
+// last_win_w = w; last_win_h = h;
+// }
+//
+// ANativeWindow_Buffer buffer;
+// if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
+// uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
+// int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
+// if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
+// ANativeWindow_unlockAndPost(window_);
+// }
+// }
+//
+// // 💡 [핵심 동기화] 연산에 소요된 시간을 제외한 나머지 시간만 대기
+// auto frame_end = std::chrono::high_resolution_clock::now();
+// int64_t actual_render_time = std::chrono::duration_cast(frame_end - frame_start).count();
+//
+// // 배속(currentSpeed)을 고려한 대기 시간 계산
+// int64_t wait_time = (int64_t)((frame_delay_us / currentSpeed) - actual_render_time);
+//
+// if (wait_time > 0) {
+// std::this_thread::sleep_for(std::chrono::microseconds(wait_time));
+// }
+// }
+// }
+// // --- 오디오 스트림 처리 ---
+// else if (pkt->stream_index == audio_stream_idx_) {
+// // 정속 재생(1.0)일 때만 오디오 재생 (오디오 배속은 로직이 복잡하므로 정속 위주 최적화)
+// if (currentSpeed == 1.0f) {
+// avcodec_send_packet(audio_codec_ctx_, pkt);
+// while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
+// if (swr_ctx_ && audio_stream_) {
+// int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
+// uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4);
+// int converted_samples = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
+//
+// if (converted_samples > 0) {
+// // 💡 [수정] 타임아웃을 0으로 설정하여 오디오 버퍼가 꽉 차도 비디오 스레드가 멈추지 않게 함
+// AAudioStream_write(audio_stream_, out_buf, converted_samples, 0);
+// }
+// free(out_buf);
+// }
+// }
+// }
+// }
+// // --- 자막 스트림 처리 ---
+// else if (pkt->stream_index == current_sub_stream_idx_) {
+// AVSubtitle sub; int got_sub = 0;
+// avcodec_decode_subtitle2(sub_codec_ctx_, &sub, &got_sub, pkt);
+// if (got_sub) {
+// for (unsigned int i = 0; i < sub.num_rects; i++) {
+// if (sub.rects[i]->type == SUBTITLE_TEXT && sub.rects[i]->text) sendSubtitleToKotlin(sub.rects[i]->text);
+// else if (sub.rects[i]->type == SUBTITLE_ASS && sub.rects[i]->ass) sendSubtitleToKotlin(sub.rects[i]->ass);
+// }
+// avsubtitle_free(&sub);
+// }
+// }
+// av_packet_unref(pkt);
+// }
+//
+// // 자원 해제 로직
+// if (audio_stream_) {
+// AAudioStream_requestStop(audio_stream_);
+// AAudioStream_close(audio_stream_);
+// audio_stream_ = nullptr;
+// }
+// av_frame_free(&frame);
+// av_packet_free(&pkt);
+// if (swr_ctx_) swr_free(&swr_ctx_);
+// if (sws_ctx_) sws_freeContext(sws_ctx_);
+// if (video_codec_ctx_) avcodec_free_context(&video_codec_ctx_);
+// if (audio_codec_ctx_) avcodec_free_context(&audio_codec_ctx_);
+// if (fmt_ctx_) avformat_close_input(&fmt_ctx_);
+//
+// LOGI("🏁 Player loop finished gracefully.");
+//}
+
+
+void PlayerEngine::readThreadLoop() {
+ AVPacket* pkt = av_packet_alloc();
+ const int MAX_QUEUE_SIZE = 15 * 1024 * 1024;
+
+ while (!abortRequest_) {
+
+ // 🚀 [탐색(Seek) 처리 로직]
+ if (seekReq_) {
+ // 1. 목표 시간 계산
+ double targetSec = isAbsoluteSeek_ ? seekTargetSec_.load() : (currentPosSec_ + seekTargetOffset_.load());
+ int64_t seek_target = (int64_t)(targetSec * AV_TIME_BASE);
+
+ // 2. FFmpeg 탐색 수행
+ av_seek_frame(fmt_ctx_, -1, seek_target, AVSEEK_FLAG_BACKWARD);
+
+ // 3. 💡 [핵심] 기존에 큐에 쌓여있던 옛날 패킷들을 전부 폭파!
+ videoQueue_.flush();
+ audioQueue_.flush();
+
+ // 4. 코덱 내부에 남아있는 옛날 프레임 찌꺼기 초기화
+// if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
+// if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
+// if (sonic_stream_) {
+// sonicFlushStream(sonic_stream_);
+// }
+//
+// // 5. 오디오 장치 초기화 (틱 잡음 방지)
+// if (audio_stream_) {
+// AAudioStream_requestPause(audio_stream_);
+// AAudioStream_requestFlush(audio_stream_);
+// AAudioStream_requestStart(audio_stream_);
+// }
+
+ videoCodecFlushReq_ = true;
+ audioCodecFlushReq_ = true;
+
+ // 6. 💡 [매우 중요] 마스터 시계 강제 업데이트
+ // 이거 안 하면 비디오 스레드가 옛날 오디오 시간과 비교하느라 화면이 굳어버립니다.
+ audioClock_ = targetSec;
+ currentPosSec_ = targetSec;
+
+ seekReq_ = false;
+ isAbsoluteSeek_ = false;
+
+ // 초기화가 끝났으니 다시 패킷을 처음부터 읽으러 올라갑니다.
+ continue;
+ }
+ // 일시정지 상태거나 큐가 너무 꽉 찼다면 잠시 대기
+ if (isPaused_ || (videoQueue_.getSizeBytes() + audioQueue_.getSizeBytes() > MAX_QUEUE_SIZE)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
- if (seekReq_) {
- av_seek_frame(fmt_ctx_, -1, (currentPosSec_ + seekTargetOffset_) * AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
- if (video_codec_ctx_) avcodec_flush_buffers(video_codec_ctx_);
- if (audio_codec_ctx_) avcodec_flush_buffers(audio_codec_ctx_);
+ if (av_read_frame(fmt_ctx_, pkt) < 0) {
+ // EOF (파일 끝) 도달 시 대기
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+ continue;
+ }
+ if (pkt->stream_index == video_stream_idx_) {
+ videoQueue_.push(pkt);
+ } else if (pkt->stream_index == audio_stream_idx_) {
+ audioQueue_.push(pkt);
+ }
+ av_packet_unref(pkt); // 큐 안에서 복사했으므로 원본은 비움
+ }
+ av_packet_free(&pkt);
+}
+
+void PlayerEngine::audioThreadLoop() {
+ AVFrame* frame = av_frame_alloc();
+ const int MIN_BUFFER_SIZE = 2 * 1024 * 1024;
+ bool isBuffering = true;
+ const int MAX_SONIC_SAMPLES = 8192;
+ short* sonic_out_buf = new short[MAX_SONIC_SAMPLES * 2];
+
+ while (!abortRequest_) {
+ if (isPaused_) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+ continue;
+ }
+
+ int currentAudioSize = audioQueue_.getSizeBytes();
+ int currentVideoSize = videoQueue_.getSizeBytes();
+ int totalQueueSize = currentAudioSize + currentVideoSize;
+
+ // 💡 배속 중이든 아니든 이제 무조건 정상적인 버퍼링 로직을 탑니다.
+ if (totalQueueSize < MIN_BUFFER_SIZE / 2) isBuffering = true;
+ else if (totalQueueSize >= MIN_BUFFER_SIZE) isBuffering = false;
+
+ if (isBuffering && !abortRequest_) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+ continue;
+ }
+
+ AVPacket* pkt = audioQueue_.pop();
+ if (!pkt) break;
+
+ // 💡 [추가] 오디오도 스스로 코덱과 Sonic 버퍼를 비웁니다.
+ if (audioCodecFlushReq_) {
+ avcodec_flush_buffers(audio_codec_ctx_);
+
+ if (sonic_stream_) {
+ sonicFlushStream(sonic_stream_);
+ }
+
+ // AAudio의 틱 잡음을 막기 위해 재생 중인 버퍼도 비워줍니다.
if (audio_stream_) {
- // 💡 안전한 버퍼 초기화를 위해 정지 -> 비움 -> 재시작 순서로 호출
AAudioStream_requestPause(audio_stream_);
AAudioStream_requestFlush(audio_stream_);
AAudioStream_requestStart(audio_stream_);
}
- seekReq_ = false;
+ audioCodecFlushReq_ = false;
}
- float currentSpeed = playbackSpeed_.load();
- if (av_read_frame(fmt_ctx_, pkt) < 0) break;
+ avcodec_send_packet(audio_codec_ctx_, pkt);
+ while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
- if (pkt->stream_index == video_stream_idx_) {
- avcodec_send_packet(video_codec_ctx_, pkt);
- while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
- currentPosSec_ = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
- if (window_) {
- int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
- if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
- if (sws_ctx_) sws_freeContext(sws_ctx_);
- sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
- last_win_w = w; last_win_h = h;
- }
- ANativeWindow_Buffer buffer;
- if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
- uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
- int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
- if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
- ANativeWindow_unlockAndPost(window_);
- }
- }
+ // 마스터 시계 갱신
+ if (frame->pts != AV_NOPTS_VALUE) {
+ audioClock_ = frame->pts * av_q2d(fmt_ctx_->streams[audio_stream_idx_]->time_base);
}
- if (currentSpeed != 1.0f || audio_stream_idx_ < 0) {
- std::this_thread::sleep_for(std::chrono::milliseconds((int)(16/currentSpeed)));
- }
- }
- else if (pkt->stream_index == audio_stream_idx_) {
- if (currentSpeed == 1.0f) {
- avcodec_send_packet(audio_codec_ctx_, pkt);
- while (avcodec_receive_frame(audio_codec_ctx_, frame) == 0) {
- if (swr_ctx_ && audio_stream_) {
- int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
- uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4);
- int converted_samples = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
- if (converted_samples > 0) {
- AAudioStream_write(audio_stream_, out_buf, converted_samples, 1000000000);
+
+ if (swr_ctx_ && audio_stream_) {
+ int out_samples = swr_get_out_samples(swr_ctx_, frame->nb_samples);
+ uint8_t* out_buf = (uint8_t*)malloc(out_samples * 4); // S16 2채널 = 4바이트
+
+ // 1. FFmpeg으로 PCM 변환 (정배속 데이터)
+ int converted = swr_convert(swr_ctx_, &out_buf, out_samples, (const uint8_t**)frame->data, frame->nb_samples);
+
+ if (converted > 0 && sonic_stream_ && !abortRequest_) {
+
+ // 🚀 2. 변환된 데이터를 Sonic 스트림에 밀어넣습니다 (입력)
+ sonicWriteShortToStream(sonic_stream_, (short*)out_buf, converted);
+
+ // 🚀 3. Sonic 스트림에서 배속 처리 완료된 데이터를 뽑아냅니다 (출력)
+ int samplesRead;
+ do {
+ // 가공된 데이터 꺼내기 (반환값: 꺼낸 샘플 수)
+ samplesRead = sonicReadShortFromStream(sonic_stream_, sonic_out_buf, MAX_SONIC_SAMPLES);
+
+ if (samplesRead > 0 && !abortRequest_) {
+ // 4. 가공된 최종 데이터를 사운드 카드(AAudio)에 씁니다!
+ int32_t framesLeft = samplesRead;
+ short* currentAAudioBuf = sonic_out_buf;
+
+ while (framesLeft > 0 && !abortRequest_) {
+ int32_t framesWritten = AAudioStream_write(audio_stream_, currentAAudioBuf, framesLeft, 1000000000);
+ if (framesWritten < 0) break;
+ framesLeft -= framesWritten;
+ currentAAudioBuf += framesWritten * 2; // 2채널 전진
+ }
}
- free(out_buf);
- }
+ } while (samplesRead > 0); // Sonic에 꺼낼 데이터가 없을 때까지 반복
}
+ free(out_buf);
}
}
- else if (pkt->stream_index == current_sub_stream_idx_) {
- AVSubtitle sub; int got_sub = 0;
- avcodec_decode_subtitle2(sub_codec_ctx_, &sub, &got_sub, pkt);
- if (got_sub) {
- for (unsigned int i = 0; i < sub.num_rects; i++) {
- if (sub.rects[i]->type == SUBTITLE_TEXT && sub.rects[i]->text) sendSubtitleToKotlin(sub.rects[i]->text);
- else if (sub.rects[i]->type == SUBTITLE_ASS && sub.rects[i]->ass) sendSubtitleToKotlin(sub.rects[i]->ass);
- }
- avsubtitle_free(&sub);
- }
- }
- av_packet_unref(pkt);
+ av_packet_free(&pkt);
+ }
+ delete[] sonic_out_buf;
+ av_frame_free(&frame);
+}
+
+void PlayerEngine::videoThreadLoop() {
+ AVFrame* frame = av_frame_alloc();
+ int last_win_w = 0, last_win_h = 0;
+
+ while (!abortRequest_) {
+ if (isPaused_) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+ continue;
+ }
+
+ AVPacket* pkt = videoQueue_.pop();
+ if (!pkt) break;
+ // 💡 [추가] 패킷을 밀어 넣기 직전에 청소 명령이 있었는지 확인!
+ if (videoCodecFlushReq_) {
+ avcodec_flush_buffers(video_codec_ctx_);
+ videoCodecFlushReq_ = false;
+ }
+
+ avcodec_send_packet(video_codec_ctx_, pkt);
+ while (avcodec_receive_frame(video_codec_ctx_, frame) == 0) {
+
+ double videoPtsSec = frame->pts * av_q2d(fmt_ctx_->streams[video_stream_idx_]->time_base);
+ currentPosSec_ = videoPtsSec; // UI 바 업데이트용
+
+ // 💡 위대한 귀환: 비디오는 무조건 오디오 시계만 쳐다봅니다!
+ double delaySec = videoPtsSec - audioClock_.load();
+ float currentSpeed = playbackSpeed_.load();
+
+ // 🚀 [A/V Sync & 프레임 드랍 로직]
+ if (delaySec > 0.01) {
+ // 비디오가 미래다 -> 남은 시간만큼 재운다
+ double waitTime = delaySec / currentSpeed;
+ std::this_thread::sleep_for(std::chrono::microseconds((int64_t)(waitTime * 1000000.0)));
+ }
+ else if (delaySec < -0.05) {
+ // 비디오가 과거다 -> 그리지 않고 가차없이 버린다 (이것 덕분에 4배속 화면 렌더링 부하가 해결됨!)
+ continue;
+ }
+
+ // --- 렌더링 통과 ---
+ if (window_) {
+ int w = ANativeWindow_getWidth(window_), h = ANativeWindow_getHeight(window_);
+ if (!sws_ctx_ || w != last_win_w || h != last_win_h) {
+ if (sws_ctx_) sws_freeContext(sws_ctx_);
+ sws_ctx_ = sws_getContext(frame->width, frame->height, video_codec_ctx_->pix_fmt, w, h, AV_PIX_FMT_RGBA, SWS_BILINEAR, nullptr, nullptr, nullptr);
+ last_win_w = w; last_win_h = h;
+ }
+
+ ANativeWindow_Buffer buffer;
+ if (ANativeWindow_lock(window_, &buffer, nullptr) == 0) {
+ uint8_t* dst_data[4] = { (uint8_t*)buffer.bits, nullptr, nullptr, nullptr };
+ int dst_line[4] = { buffer.stride * 4, 0, 0, 0 };
+ if (sws_ctx_) sws_scale(sws_ctx_, frame->data, frame->linesize, 0, frame->height, dst_data, dst_line);
+ ANativeWindow_unlockAndPost(window_);
+ }
+ }
+ }
+ av_packet_free(&pkt);
}
-
- if (audio_stream_) { AAudioStream_requestStop(audio_stream_); AAudioStream_close(audio_stream_); audio_stream_ = nullptr; }
av_frame_free(&frame);
- av_packet_free(&pkt);
- if (swr_ctx_) swr_free(&swr_ctx_);
- if (sws_ctx_) sws_freeContext(sws_ctx_);
- if (video_codec_ctx_) avcodec_free_context(&video_codec_ctx_);
- if (audio_codec_ctx_) avcodec_free_context(&audio_codec_ctx_);
- if (fmt_ctx_) avformat_close_input(&fmt_ctx_);
- LOGI("Player loop finished gracefully.");
}
std::string PlayerEngine::getSubtitleTracks() { return subtitle_tracks_info_; }
diff --git a/app/src/main/cpp/PlayerEngine.h b/app/src/main/cpp/PlayerEngine.h
index c386a922..8f389d87 100644
--- a/app/src/main/cpp/PlayerEngine.h
+++ b/app/src/main/cpp/PlayerEngine.h
@@ -1,10 +1,12 @@
#pragma once
+#include "PacketQueue.h"
#include
#include
#include
#include
#include
#include
+#include "sonic.h" // 💡 Sonic 헤더 추가
extern "C" {
#include
@@ -31,8 +33,31 @@ public:
std::string getSubtitleTracks();
void setSubtitleTrack(int streamIndex);
+ double getDuration() const;
+ void seekTo(double seconds);
private:
+ void readThreadLoop(); // 패킷을 읽어서 큐에 넣는 스레드 (생산자)
+ void videoThreadLoop(); // 비디오 큐에서 꺼내서 렌더링하는 스레드 (소비자)
+ void audioThreadLoop(); // 오디오 큐에서 꺼내서 출력하는 스레드 (소비자)
+
+ std::atomic videoCodecFlushReq_{false};
+ std::atomic audioCodecFlushReq_{false};
+
+ PacketQueue videoQueue_;
+ PacketQueue audioQueue_;
+
+ std::thread readThread_;
+ std::thread videoThread_;
+ std::thread audioThread_;
+
+ std::atomic abortRequest_{false};
+
+ // 💡 [A/V Sync의 핵심] 현재 오디오가 어디까지 재생되었는지 기록하는 마스터 시계
+ std::atomic audioClock_{0.0};
+
+ std::atomic isAbsoluteSeek_{false}; // 절대 위치 이동 여부 플래그
+ std::atomic seekTargetSec_{0.0};
void prepareInternal(); // 💡 백그라운드 준비 스레드
void renderLoop();
void sendSubtitleToKotlin(const char* text);
@@ -75,4 +100,7 @@ private:
jmethodID videoSizeMethodId_ = nullptr;
jmethodID preparedMethodId_ = nullptr; // 💡 JNI 콜백 ID 추가
jmethodID errorMethodId_ = nullptr; // 💡 JNI 에러 콜백 ID 추가
+
+ sonicStream sonic_stream_ = nullptr;
+
};
\ No newline at end of file
diff --git a/app/src/main/cpp/native_player.cpp b/app/src/main/cpp/native_player.cpp
index beb2e3c9..4a94eef7 100644
--- a/app/src/main/cpp/native_player.cpp
+++ b/app/src/main/cpp/native_player.cpp
@@ -9,6 +9,18 @@ T* toPlayerNative(jlong handle) { return reinterpret_cast(handle); }
extern "C" {
+JNIEXPORT jdouble JNICALL
+Java_bums_lunatic_launcher_player_NativePlayer_nativeGetDuration(JNIEnv *env, jobject thiz, jlong handle) {
+ PlayerEngine* engine = toPlayerNative(handle);
+ return engine ? engine->getDuration() : 0.0;
+}
+
+JNIEXPORT void JNICALL
+Java_bums_lunatic_launcher_player_NativePlayer_nativeSeekTo(JNIEnv *env, jobject thiz, jlong handle, jdouble seconds) {
+ PlayerEngine* engine = toPlayerNative(handle);
+ if (engine) engine->seekTo(seconds);
+}
+
JNIEXPORT jlong JNICALL
Java_bums_lunatic_launcher_player_NativePlayer_nativeInit(JNIEnv *env, jobject thiz) {
PlayerEngine* engine = new PlayerEngine(g_vm, thiz);
diff --git a/app/src/main/cpp/sonic.c b/app/src/main/cpp/sonic.c
new file mode 100644
index 00000000..880b1ec5
--- /dev/null
+++ b/app/src/main/cpp/sonic.c
@@ -0,0 +1,1246 @@
+/* Sonic library
+ Copyright 2010
+ Bill Cox
+ This file is part of the Sonic Library.
+
+ This file is licensed under the Apache 2.0 license.
+*/
+
+#include "sonic.h"
+#include
+#include
+#include
+#include
+
+/*
+ The following code was used to generate the following sinc lookup table.
+
+ #include
+ #include
+ #include
+
+ double findHannWeight(int N, double x) {
+ return 0.5*(1.0 - cos(2*M_PI*x/N));
+ }
+
+ double findSincCoefficient(int N, double x) {
+ double hannWindowWeight = findHannWeight(N, x);
+ double sincWeight;
+
+ x -= N/2.0;
+ if (x > 1e-9 || x < -1e-9) {
+ sincWeight = sin(M_PI*x)/(M_PI*x);
+ } else {
+ sincWeight = 1.0;
+ }
+ return hannWindowWeight*sincWeight;
+ }
+
+ int main() {
+ double x;
+ int i;
+ int N = 12;
+
+ for (i = 0, x = 0.0; x <= N; x += 0.02, i++) {
+ printf("%u %d\n", i, (int)(SHRT_MAX*findSincCoefficient(N, x)));
+ }
+ return 0;
+ }
+*/
+
+#define CLAMP(val, min, max) \
+ ((val) < (min) ? (min) : (val) > (max) ? (max) : (val))
+
+/* The number of points to use in the sinc FIR filter for resampling. */
+#define SINC_FILTER_POINTS \
+ 12 /* I am not able to hear improvement with higher N. */
+#define SINC_TABLE_SIZE 601
+
+/* Lookup table for windowed sinc function of SINC_FILTER_POINTS points. */
+static short sincTable[SINC_TABLE_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, -1, -1, -2, -2,
+ -3, -4, -6, -7, -9, -10, -12, -14, -17, -19, -21,
+ -24, -26, -29, -32, -34, -37, -40, -42, -44, -47, -48,
+ -50, -51, -52, -53, -53, -53, -52, -50, -48, -46, -43,
+ -39, -34, -29, -22, -16, -8, 0, 9, 19, 29, 41,
+ 53, 65, 79, 92, 107, 121, 137, 152, 168, 184, 200,
+ 215, 231, 247, 262, 276, 291, 304, 317, 328, 339, 348,
+ 357, 363, 369, 372, 374, 375, 373, 369, 363, 355, 345,
+ 332, 318, 300, 281, 259, 234, 208, 178, 147, 113, 77,
+ 39, 0, -41, -85, -130, -177, -225, -274, -324, -375, -426,
+ -478, -530, -581, -632, -682, -731, -779, -825, -870, -912, -951,
+ -989, -1023, -1053, -1080, -1104, -1123, -1138, -1149, -1154, -1155, -1151,
+ -1141, -1125, -1105, -1078, -1046, -1007, -963, -913, -857, -796, -728,
+ -655, -576, -492, -403, -309, -210, -107, 0, 111, 225, 342,
+ 462, 584, 708, 833, 958, 1084, 1209, 1333, 1455, 1575, 1693,
+ 1807, 1916, 2022, 2122, 2216, 2304, 2384, 2457, 2522, 2579, 2625,
+ 2663, 2689, 2706, 2711, 2705, 2687, 2657, 2614, 2559, 2491, 2411,
+ 2317, 2211, 2092, 1960, 1815, 1658, 1489, 1308, 1115, 912, 698,
+ 474, 241, 0, -249, -506, -769, -1037, -1310, -1586, -1864, -2144,
+ -2424, -2703, -2980, -3254, -3523, -3787, -4043, -4291, -4529, -4757, -4972,
+ -5174, -5360, -5531, -5685, -5819, -5935, -6029, -6101, -6150, -6175, -6175,
+ -6149, -6096, -6015, -5905, -5767, -5599, -5401, -5172, -4912, -4621, -4298,
+ -3944, -3558, -3141, -2693, -2214, -1705, -1166, -597, 0, 625, 1277,
+ 1955, 2658, 3386, 4135, 4906, 5697, 6506, 7332, 8173, 9027, 9893,
+ 10769, 11654, 12544, 13439, 14335, 15232, 16128, 17019, 17904, 18782, 19649,
+ 20504, 21345, 22170, 22977, 23763, 24527, 25268, 25982, 26669, 27327, 27953,
+ 28547, 29107, 29632, 30119, 30569, 30979, 31349, 31678, 31964, 32208, 32408,
+ 32565, 32677, 32744, 32767, 32744, 32677, 32565, 32408, 32208, 31964, 31678,
+ 31349, 30979, 30569, 30119, 29632, 29107, 28547, 27953, 27327, 26669, 25982,
+ 25268, 24527, 23763, 22977, 22170, 21345, 20504, 19649, 18782, 17904, 17019,
+ 16128, 15232, 14335, 13439, 12544, 11654, 10769, 9893, 9027, 8173, 7332,
+ 6506, 5697, 4906, 4135, 3386, 2658, 1955, 1277, 625, 0, -597,
+ -1166, -1705, -2214, -2693, -3141, -3558, -3944, -4298, -4621, -4912, -5172,
+ -5401, -5599, -5767, -5905, -6015, -6096, -6149, -6175, -6175, -6150, -6101,
+ -6029, -5935, -5819, -5685, -5531, -5360, -5174, -4972, -4757, -4529, -4291,
+ -4043, -3787, -3523, -3254, -2980, -2703, -2424, -2144, -1864, -1586, -1310,
+ -1037, -769, -506, -249, 0, 241, 474, 698, 912, 1115, 1308,
+ 1489, 1658, 1815, 1960, 2092, 2211, 2317, 2411, 2491, 2559, 2614,
+ 2657, 2687, 2705, 2711, 2706, 2689, 2663, 2625, 2579, 2522, 2457,
+ 2384, 2304, 2216, 2122, 2022, 1916, 1807, 1693, 1575, 1455, 1333,
+ 1209, 1084, 958, 833, 708, 584, 462, 342, 225, 111, 0,
+ -107, -210, -309, -403, -492, -576, -655, -728, -796, -857, -913,
+ -963, -1007, -1046, -1078, -1105, -1125, -1141, -1151, -1155, -1154, -1149,
+ -1138, -1123, -1104, -1080, -1053, -1023, -989, -951, -912, -870, -825,
+ -779, -731, -682, -632, -581, -530, -478, -426, -375, -324, -274,
+ -225, -177, -130, -85, -41, 0, 39, 77, 113, 147, 178,
+ 208, 234, 259, 281, 300, 318, 332, 345, 355, 363, 369,
+ 373, 375, 374, 372, 369, 363, 357, 348, 339, 328, 317,
+ 304, 291, 276, 262, 247, 231, 215, 200, 184, 168, 152,
+ 137, 121, 107, 92, 79, 65, 53, 41, 29, 19, 9,
+ 0, -8, -16, -22, -29, -34, -39, -43, -46, -48, -50,
+ -52, -53, -53, -53, -52, -51, -50, -48, -47, -44, -42,
+ -40, -37, -34, -32, -29, -26, -24, -21, -19, -17, -14,
+ -12, -10, -9, -7, -6, -4, -3, -2, -2, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0};
+
+/* These functions allocate out of a static array rather than calling
+ calloc/realloc/free if the NO_MALLOC flag is defined. Otherwise, call
+ calloc/realloc/free as usual. This is useful for running on small
+ microcontrollers. */
+#ifndef SONIC_NO_MALLOC
+
+/* Just call calloc. */
+static void* sonicCalloc(int num, int size) { return calloc(num, size); }
+
+/* Just call realloc */
+static void* sonicRealloc(void* p, int oldNum, int newNum, int size) {
+ return realloc(p, newNum * size);
+}
+
+/* Just call free. */
+static void sonicFree(void* p) { free(p); }
+
+#else
+
+#ifndef SONIC_MAX_MEMORY
+/* Large enough for speedup/slowdown at 8KHz, 16-bit mono samples/second. */
+#define SONIC_MAX_MEMORY (16 * 1024)
+#endif
+
+/* This static buffer is used to hold data allocated for the sonicStream struct
+ and its buffers. There should never be more than one sonicStream in use at a
+ time when using SONIC_NO_MALLOC mode. Calls to realloc move the data to the
+ end of memoryBuffer. Calls to free reset the memory buffer to empty. */
+static void*
+ memoryBufferAligned[(SONIC_MAX_MEMORY + sizeof(void) - 1) / sizeof(void*)];
+static unsigned char* memoryBuffer = (unsigned char*)memoryBufferAligned;
+static int memoryBufferPos = 0;
+
+/* Allocate elements from a static memory buffer. */
+static void* sonicCalloc(int num, int size) {
+ int len = num * size;
+
+ if (memoryBufferPos + len > SONIC_MAX_MEMORY) {
+ return 0;
+ }
+ unsigned char* p = memoryBuffer + memoryBufferPos;
+ memoryBufferPos += len;
+ memset(p, 0, len);
+ return p;
+}
+
+/* Preferably, SONIC_MAX_MEMORY has been set large enough that this is never
+ * called. */
+static void* sonicRealloc(void* p, int oldNum, int newNum, int size) {
+ if (newNum <= oldNum) {
+ return p;
+ }
+ void* newBuffer = sonicCalloc(newNum, size);
+ if (newBuffer == NULL) {
+ return NULL;
+ }
+ memcpy(newBuffer, p, oldNum * size);
+ return newBuffer;
+}
+
+/* Reset memoryBufferPos to 0. We asssume all data is freed at the same time.
+ */
+static void sonicFree(void* p) { memoryBufferPos = 0; }
+
+#endif
+
+struct sonicStreamStruct {
+#ifdef SONIC_SPECTROGRAM
+ sonicSpectrogram spectrogram;
+#endif /* SONIC_SPECTROGRAM */
+ short* inputBuffer;
+ short* outputBuffer;
+ short* pitchBuffer;
+ short* downSampleBuffer;
+ void* userData;
+ float speed;
+ float volume;
+ float pitch;
+ float rate;
+ /* The point of the following 3 new variables is to gracefully handle rapidly
+ changing input speed.
+
+ samplePeriod is just 1.0/sampleRate. It is used in accumulating
+ inputPlayTime, which is how long we expect the total time should be to play
+ the current input samples in the input buffer. timeError keeps track of
+ the error in play time created when playing < 2.0X speed, where we either
+ insert or delete a whole pitch period. This can cause the output generated
+ from the input to be off in play time by up to a pitch period. timeError
+ replaces PICOLA's concept of the number of samples to play unmodified after
+ a pitch period insertion or deletion. If speeding up, and the error is >=
+ 0.0, then remove a pitch period, and play samples unmodified until
+ timeError is >= 0 again. If slowing down, and the error is <= 0.0,
+ then add a pitch period, and play samples unmodified until timeError is <=
+ 0 again. */
+ float samplePeriod; /* How long each output sample takes to play. */
+ /* How long we expect the entire input buffer to take to play. */
+ float inputPlayTime;
+ /* The difference in when the latest output sample was played vs when we
+ * wanted. */
+ float timeError;
+ int oldRatePosition;
+ int newRatePosition;
+ int quality;
+ int numChannels;
+ int inputBufferSize;
+ int pitchBufferSize;
+ int outputBufferSize;
+ int numInputSamples;
+ int numOutputSamples;
+ int numPitchSamples;
+ int minPeriod;
+ int maxPeriod;
+ int maxRequired;
+ int remainingInputToCopy;
+ int sampleRate;
+ int prevPeriod;
+ int prevMinDiff;
+};
+
+/* Attach user data to the stream. */
+void sonicSetUserData(sonicStream stream, void* userData) {
+ stream->userData = userData;
+}
+
+/* Retrieve user data attached to the stream. */
+void* sonicGetUserData(sonicStream stream) { return stream->userData; }
+
+#ifdef SONIC_SPECTROGRAM
+
+/* Compute a spectrogram on the fly. */
+void sonicComputeSpectrogram(sonicStream stream) {
+ stream->spectrogram = sonicCreateSpectrogram(stream->sampleRate);
+ /* Force changeSpeed to be called to compute the spectrogram. */
+ sonicSetSpeed(stream, 2.0);
+}
+
+/* Get the spectrogram. */
+sonicSpectrogram sonicGetSpectrogram(sonicStream stream) {
+ return stream->spectrogram;
+}
+
+#endif
+
+/* Scale the samples by the factor. */
+static void scaleSamples(short* samples, int numSamples, float volume) {
+ /* This is 24-bit integer and 8-bit fraction fixed-point representation. */
+ int fixedPointVolume = volume * 256.0f;
+ int value;
+
+ while (numSamples--) {
+ value = (*samples * fixedPointVolume) >> 8;
+ if (value > 32767) {
+ value = 32767;
+ } else if (value < -32767) {
+ value = -32767;
+ }
+ *samples++ = value;
+ }
+}
+
+/* Get the speed of the stream. */
+float sonicGetSpeed(sonicStream stream) { return stream->speed; }
+
+/* Set the speed of the stream. */
+void sonicSetSpeed(sonicStream stream, float speed) {
+ stream->speed = CLAMP(speed, SONIC_MIN_SPEED, SONIC_MAX_SPEED);
+}
+
+/* Get the pitch of the stream. */
+float sonicGetPitch(sonicStream stream) { return stream->pitch; }
+
+/* Set the pitch of the stream. */
+void sonicSetPitch(sonicStream stream, float pitch) {
+ stream->pitch = CLAMP(pitch, SONIC_MIN_PITCH_SETTING, SONIC_MAX_PITCH_SETTING);
+}
+
+/* Get the rate of the stream. */
+float sonicGetRate(sonicStream stream) { return stream->rate; }
+
+/* Set the playback rate of the stream. This scales pitch and speed at the same
+ time. */
+void sonicSetRate(sonicStream stream, float rate) {
+ stream->rate = CLAMP(rate, SONIC_MIN_RATE, SONIC_MAX_RATE);
+
+ stream->oldRatePosition = 0;
+ stream->newRatePosition = 0;
+}
+
+/* DEPRECATED. Get the vocal chord pitch setting. */
+int sonicGetChordPitch(sonicStream stream) { return 0; }
+
+/* DEPRECATED. Set the vocal chord mode for pitch computation. Default is off.
+ */
+void sonicSetChordPitch(sonicStream stream, int useChordPitch) {}
+
+/* Get the quality setting. */
+int sonicGetQuality(sonicStream stream) { return stream->quality; }
+
+/* Set the "quality". Default 0 is virtually as good as 1, but very much
+ faster. */
+void sonicSetQuality(sonicStream stream, int quality) {
+ stream->quality = quality != 0? 1 : 0;
+}
+
+/* Get the scaling factor of the stream. */
+float sonicGetVolume(sonicStream stream) { return stream->volume; }
+
+/* Set the scaling factor of the stream. */
+void sonicSetVolume(sonicStream stream, float volume) {
+ stream->volume = CLAMP(volume, SONIC_MIN_VOLUME, SONIC_MAX_VOLUME);
+}
+
+/* Free stream buffers. */
+static void freeStreamBuffers(sonicStream stream) {
+ if (stream->inputBuffer != NULL) {
+ sonicFree(stream->inputBuffer);
+ }
+ if (stream->outputBuffer != NULL) {
+ sonicFree(stream->outputBuffer);
+ }
+ if (stream->pitchBuffer != NULL) {
+ sonicFree(stream->pitchBuffer);
+ }
+ if (stream->downSampleBuffer != NULL) {
+ sonicFree(stream->downSampleBuffer);
+ }
+}
+
+/* Destroy the sonic stream. */
+void sonicDestroyStream(sonicStream stream) {
+#ifdef SONIC_SPECTROGRAM
+ if (stream->spectrogram != NULL) {
+ sonicDestroySpectrogram(stream->spectrogram);
+ }
+#endif /* SONIC_SPECTROGRAM */
+ freeStreamBuffers(stream);
+ sonicFree(stream);
+}
+
+/* Compute the number of samples to skip to down-sample the input. */
+static int computeSkip(sonicStream stream, int sampleRate) {
+ int skip = 1;
+ if (sampleRate > SONIC_AMDF_FREQ && stream->quality == 0) {
+ skip = sampleRate / SONIC_AMDF_FREQ;
+ }
+ return skip;
+}
+
+/* Allocate stream buffers. */
+static int allocateStreamBuffers(sonicStream stream, int sampleRate,
+ int numChannels) {
+ int minPeriod = sampleRate / SONIC_MAX_PITCH;
+ int maxPeriod = sampleRate / SONIC_MIN_PITCH;
+ int maxRequired = 2 * maxPeriod;
+
+ /* Allocate 25% more than needed so we hopefully won't grow. */
+ stream->inputBufferSize = maxRequired + (maxRequired >> 2);
+
+ stream->inputBuffer =
+ (short*)sonicCalloc(stream->inputBufferSize, sizeof(short) * numChannels);
+ if (stream->inputBuffer == NULL) {
+ sonicDestroyStream(stream);
+ return 0;
+ }
+ /* Allocate 25% more than needed so we hopefully won't grow. */
+ stream->outputBufferSize = maxRequired + (maxRequired >> 2);
+ stream->outputBuffer = (short*)sonicCalloc(stream->outputBufferSize,
+ sizeof(short) * numChannels);
+ if (stream->outputBuffer == NULL) {
+ sonicDestroyStream(stream);
+ return 0;
+ }
+ /* Allocate 25% more than needed so we hopefully won't grow. */
+ stream->pitchBufferSize = maxRequired + (maxRequired >> 2);
+ stream->pitchBuffer =
+ (short*)sonicCalloc(stream->pitchBufferSize, sizeof(short) * numChannels);
+ if (stream->pitchBuffer == NULL) {
+ sonicDestroyStream(stream);
+ return 0;
+ }
+ int downSampleBufferSize = maxRequired;
+ stream->downSampleBuffer =
+ (short*)sonicCalloc(downSampleBufferSize, sizeof(short));
+ if (stream->downSampleBuffer == NULL) {
+ sonicDestroyStream(stream);
+ return 0;
+ }
+ stream->sampleRate = sampleRate;
+ stream->samplePeriod = 1.0 / sampleRate;
+ stream->numChannels = numChannels;
+ stream->oldRatePosition = 0;
+ stream->newRatePosition = 0;
+ stream->minPeriod = minPeriod;
+ stream->maxPeriod = maxPeriod;
+ stream->maxRequired = maxRequired;
+ stream->prevPeriod = 0;
+ return 1;
+}
+
+/* Create a sonic stream. Return NULL only if we are out of memory and cannot
+ allocate the stream. */
+sonicStream sonicCreateStream(int sampleRate, int numChannels) {
+ sonicStream stream =
+ (sonicStream)sonicCalloc(1, sizeof(struct sonicStreamStruct));
+
+ sampleRate = CLAMP(sampleRate, SONIC_MIN_SAMPLE_RATE, SONIC_MAX_SAMPLE_RATE);
+ numChannels = CLAMP(numChannels, SONIC_MIN_CHANNELS, SONIC_MAX_CHANNELS);
+ if (stream == NULL) {
+ return NULL;
+ }
+ if (!allocateStreamBuffers(stream, sampleRate, numChannels)) {
+ return NULL;
+ }
+ stream->speed = 1.0f;
+ stream->pitch = 1.0f;
+ stream->volume = 1.0f;
+ stream->rate = 1.0f;
+ stream->oldRatePosition = 0;
+ stream->newRatePosition = 0;
+ stream->quality = 0;
+ return stream;
+}
+
+/* Get the sample rate of the stream. */
+int sonicGetSampleRate(sonicStream stream) { return stream->sampleRate; }
+
+/* Set the sample rate of the stream. This will cause samples buffered in the
+ stream to be lost. */
+void sonicSetSampleRate(sonicStream stream, int sampleRate) {
+ sampleRate = CLAMP(sampleRate, SONIC_MIN_SAMPLE_RATE, SONIC_MAX_SAMPLE_RATE);
+ freeStreamBuffers(stream);
+ allocateStreamBuffers(stream, sampleRate, stream->numChannels);
+}
+
+/* Get the number of channels. */
+int sonicGetNumChannels(sonicStream stream) { return stream->numChannels; }
+
+/* Set the num channels of the stream. This will cause samples buffered in the
+ stream to be lost. */
+void sonicSetNumChannels(sonicStream stream, int numChannels) {
+ numChannels = CLAMP(numChannels, SONIC_MIN_CHANNELS, SONIC_MAX_CHANNELS);
+ freeStreamBuffers(stream);
+ allocateStreamBuffers(stream, stream->sampleRate, numChannels);
+}
+
+/* Enlarge the output buffer if needed. */
+static int enlargeOutputBufferIfNeeded(sonicStream stream, int numSamples) {
+ int outputBufferSize = stream->outputBufferSize;
+
+ if (stream->numOutputSamples + numSamples > outputBufferSize) {
+ stream->outputBufferSize += (outputBufferSize >> 1) + numSamples;
+ stream->outputBuffer = (short*)sonicRealloc(
+ stream->outputBuffer, outputBufferSize, stream->outputBufferSize,
+ sizeof(short) * stream->numChannels);
+ if (stream->outputBuffer == NULL) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Enlarge the input buffer if needed. */
+static int enlargeInputBufferIfNeeded(sonicStream stream, int numSamples) {
+ int inputBufferSize = stream->inputBufferSize;
+
+ if (stream->numInputSamples + numSamples > inputBufferSize) {
+ stream->inputBufferSize += (inputBufferSize >> 1) + numSamples;
+ stream->inputBuffer = (short*)sonicRealloc(
+ stream->inputBuffer, inputBufferSize, stream->inputBufferSize,
+ sizeof(short) * stream->numChannels);
+ if (stream->inputBuffer == NULL) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Update stream->numInputSamples, and update stream->inputPlayTime. Call this
+ whenever adding samples to the input buffer, to keep track of total expected
+ input play time accounting. */
+static void updateNumInputSamples(sonicStream stream, int numSamples) {
+ float speed = stream->speed / stream->pitch;
+
+ stream->numInputSamples += numSamples;
+ stream->inputPlayTime += numSamples * stream->samplePeriod / speed;
+}
+
+/* Add the input samples to the input buffer. */
+static int addFloatSamplesToInputBuffer(sonicStream stream,
+ const float* samples, int numSamples) {
+ short* buffer;
+ int count = numSamples * stream->numChannels;
+
+ if (numSamples == 0) {
+ return 1;
+ }
+ if (!enlargeInputBufferIfNeeded(stream, numSamples)) {
+ return 0;
+ }
+ buffer = stream->inputBuffer + stream->numInputSamples * stream->numChannels;
+ while (count--) {
+ *buffer++ = (*samples++) * 32767.0f;
+ }
+ updateNumInputSamples(stream, numSamples);
+ return 1;
+}
+
+/* Add the input samples to the input buffer. */
+static int addShortSamplesToInputBuffer(sonicStream stream,
+ const short* samples, int numSamples) {
+ if (numSamples == 0) {
+ return 1;
+ }
+ if (!enlargeInputBufferIfNeeded(stream, numSamples)) {
+ return 0;
+ }
+ memcpy(stream->inputBuffer + stream->numInputSamples * stream->numChannels,
+ samples, numSamples * sizeof(short) * stream->numChannels);
+ updateNumInputSamples(stream, numSamples);
+ return 1;
+}
+
+/* Add the input samples to the input buffer. */
+static int addUnsignedCharSamplesToInputBuffer(sonicStream stream,
+ const unsigned char* samples,
+ int numSamples) {
+ short* buffer;
+ int count = numSamples * stream->numChannels;
+
+ if (numSamples == 0) {
+ return 1;
+ }
+ if (!enlargeInputBufferIfNeeded(stream, numSamples)) {
+ return 0;
+ }
+ buffer = stream->inputBuffer + stream->numInputSamples * stream->numChannels;
+ while (count--) {
+ *buffer++ = (*samples++ - 128) << 8;
+ }
+ updateNumInputSamples(stream, numSamples);
+ return 1;
+}
+
+/* Remove input samples that we have already processed. */
+static void removeInputSamples(sonicStream stream, int position) {
+ int remainingSamples = stream->numInputSamples - position;
+
+ if (remainingSamples > 0) {
+ memmove(stream->inputBuffer,
+ stream->inputBuffer + position * stream->numChannels,
+ remainingSamples * sizeof(short) * stream->numChannels);
+ }
+ /* If we play 3/4ths of the samples, then the expected play time of the
+ remaining samples is 1/4th of the original expected play time. */
+ stream->inputPlayTime =
+ (stream->inputPlayTime * remainingSamples) / stream->numInputSamples;
+ stream->numInputSamples = remainingSamples;
+}
+
+/* Copy from the input buffer to the output buffer, and remove the samples from
+ the input buffer. */
+static int copyInputToOutput(sonicStream stream, int numSamples) {
+ if (!enlargeOutputBufferIfNeeded(stream, numSamples)) {
+ return 0;
+ }
+ memcpy(stream->outputBuffer + stream->numOutputSamples * stream->numChannels,
+ stream->inputBuffer, numSamples * sizeof(short) * stream->numChannels);
+ stream->numOutputSamples += numSamples;
+ removeInputSamples(stream, numSamples);
+ return 1;
+}
+
+/* Copy from samples to the output buffer */
+static int copyToOutput(sonicStream stream, short* samples, int numSamples) {
+ if (!enlargeOutputBufferIfNeeded(stream, numSamples)) {
+ return 0;
+ }
+ memcpy(stream->outputBuffer + stream->numOutputSamples * stream->numChannels,
+ samples, numSamples * sizeof(short) * stream->numChannels);
+ stream->numOutputSamples += numSamples;
+ return 1;
+}
+
+/* Read data out of the stream. Sometimes no data will be available, and zero
+ is returned, which is not an error condition. */
+int sonicReadFloatFromStream(sonicStream stream, float* samples,
+ int maxSamples) {
+ int numSamples = stream->numOutputSamples;
+ int remainingSamples = 0;
+ short* buffer;
+ int count;
+
+ if (numSamples == 0) {
+ return 0;
+ }
+ if (numSamples > maxSamples) {
+ remainingSamples = numSamples - maxSamples;
+ numSamples = maxSamples;
+ }
+ buffer = stream->outputBuffer;
+ count = numSamples * stream->numChannels;
+ while (count--) {
+ *samples++ = (*buffer++) / 32767.0f;
+ }
+ if (remainingSamples > 0) {
+ memmove(stream->outputBuffer,
+ stream->outputBuffer + numSamples * stream->numChannels,
+ remainingSamples * sizeof(short) * stream->numChannels);
+ }
+ stream->numOutputSamples = remainingSamples;
+ return numSamples;
+}
+
+/* Read short data out of the stream. Sometimes no data will be available, and
+ zero is returned, which is not an error condition. */
+int sonicReadShortFromStream(sonicStream stream, short* samples,
+ int maxSamples) {
+ int numSamples = stream->numOutputSamples;
+ int remainingSamples = 0;
+
+ if (numSamples == 0) {
+ return 0;
+ }
+ if (numSamples > maxSamples) {
+ remainingSamples = numSamples - maxSamples;
+ numSamples = maxSamples;
+ }
+ memcpy(samples, stream->outputBuffer,
+ numSamples * sizeof(short) * stream->numChannels);
+ if (remainingSamples > 0) {
+ memmove(stream->outputBuffer,
+ stream->outputBuffer + numSamples * stream->numChannels,
+ remainingSamples * sizeof(short) * stream->numChannels);
+ }
+ stream->numOutputSamples = remainingSamples;
+ return numSamples;
+}
+
+/* Read unsigned char data out of the stream. Sometimes no data will be
+ available, and zero is returned, which is not an error condition. */
+int sonicReadUnsignedCharFromStream(sonicStream stream, unsigned char* samples,
+ int maxSamples) {
+ int numSamples = stream->numOutputSamples;
+ int remainingSamples = 0;
+ short* buffer;
+ int count;
+
+ if (numSamples == 0) {
+ return 0;
+ }
+ if (numSamples > maxSamples) {
+ remainingSamples = numSamples - maxSamples;
+ numSamples = maxSamples;
+ }
+ buffer = stream->outputBuffer;
+ count = numSamples * stream->numChannels;
+ while (count--) {
+ *samples++ = (char)((*buffer++) >> 8) + 128;
+ }
+ if (remainingSamples > 0) {
+ memmove(stream->outputBuffer,
+ stream->outputBuffer + numSamples * stream->numChannels,
+ remainingSamples * sizeof(short) * stream->numChannels);
+ }
+ stream->numOutputSamples = remainingSamples;
+ return numSamples;
+}
+
+/* Force the sonic stream to generate output using whatever data it currently
+ has. No extra delay will be added to the output, but flushing in the middle
+ of words could introduce distortion. */
+int sonicFlushStream(sonicStream stream) {
+ int maxRequired = stream->maxRequired;
+ int remainingSamples = stream->numInputSamples;
+ float speed = stream->speed / stream->pitch;
+ float rate = stream->rate * stream->pitch;
+ int expectedOutputSamples =
+ stream->numOutputSamples +
+ (int)((remainingSamples / speed + stream->numPitchSamples) / rate + 0.5f);
+
+ /* Add enough silence to flush both input and pitch buffers. */
+ if (!enlargeInputBufferIfNeeded(stream, remainingSamples + 2 * maxRequired)) {
+ return 0;
+ }
+ memset(stream->inputBuffer + remainingSamples * stream->numChannels, 0,
+ 2 * maxRequired * sizeof(short) * stream->numChannels);
+ stream->numInputSamples += 2 * maxRequired;
+ if (!sonicWriteShortToStream(stream, NULL, 0)) {
+ return 0;
+ }
+ /* Throw away any extra samples we generated due to the silence we added */
+ if (stream->numOutputSamples > expectedOutputSamples) {
+ stream->numOutputSamples = expectedOutputSamples;
+ }
+ /* Empty input and pitch buffers */
+ stream->numInputSamples = 0;
+ stream->inputPlayTime = 0.0f;
+ stream->timeError = 0.0f;
+ stream->numPitchSamples = 0;
+ return 1;
+}
+
+/* Return the number of samples in the output buffer */
+int sonicSamplesAvailable(sonicStream stream) {
+ return stream->numOutputSamples;
+}
+
+/* If skip is greater than one, average skip samples together and write them to
+ the down-sample buffer. If numChannels is greater than one, mix the channels
+ together as we down sample. */
+static void downSampleInput(sonicStream stream, short* samples, int skip) {
+ int numSamples = stream->maxRequired / skip;
+ int samplesPerValue = stream->numChannels * skip;
+ int i, j;
+ int value;
+ short* downSamples = stream->downSampleBuffer;
+
+ for (i = 0; i < numSamples; i++) {
+ value = 0;
+ for (j = 0; j < samplesPerValue; j++) {
+ value += *samples++;
+ }
+ value /= samplesPerValue;
+ *downSamples++ = value;
+ }
+}
+
+/* Find the best frequency match in the range, and given a sample skip multiple.
+ For now, just find the pitch of the first channel. */
+static int findPitchPeriodInRange(short* samples, int minPeriod, int maxPeriod,
+ int* retMinDiff, int* retMaxDiff) {
+ int period, bestPeriod = 0, worstPeriod = 255;
+ short* s;
+ short* p;
+ short sVal, pVal;
+ unsigned long diff, minDiff = 1, maxDiff = 0;
+ int i;
+
+ for (period = minPeriod; period <= maxPeriod; period++) {
+ diff = 0;
+ s = samples;
+ p = samples + period;
+ for (i = 0; i < period; i++) {
+ sVal = *s++;
+ pVal = *p++;
+ diff += sVal >= pVal ? (unsigned short)(sVal - pVal)
+ : (unsigned short)(pVal - sVal);
+ }
+ /* Note that the highest number of samples we add into diff will be less
+ than 256, since we skip samples. Thus, diff is a 24 bit number, and
+ we can safely multiply by numSamples without overflow */
+ if (bestPeriod == 0 || diff * bestPeriod < minDiff * period) {
+ minDiff = diff;
+ bestPeriod = period;
+ }
+ if (diff * worstPeriod > maxDiff * period) {
+ maxDiff = diff;
+ worstPeriod = period;
+ }
+ }
+ *retMinDiff = minDiff / bestPeriod;
+ *retMaxDiff = maxDiff / worstPeriod;
+ return bestPeriod;
+}
+
+/* At abrupt ends of voiced words, we can have pitch periods that are better
+ approximated by the previous pitch period estimate. Try to detect this case.
+ */
+static int prevPeriodBetter(sonicStream stream, int minDiff, int maxDiff,
+ int preferNewPeriod) {
+ if (minDiff == 0 || stream->prevPeriod == 0) {
+ return 0;
+ }
+ if (preferNewPeriod) {
+ if (maxDiff > minDiff * 3) {
+ /* Got a reasonable match this period */
+ return 0;
+ }
+ if (minDiff * 2 <= stream->prevMinDiff * 3) {
+ /* Mismatch is not that much greater this period */
+ return 0;
+ }
+ } else {
+ if (minDiff <= stream->prevMinDiff) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Find the pitch period. This is a critical step, and we may have to try
+ multiple ways to get a good answer. This version uses Average Magnitude
+ Difference Function (AMDF). To improve speed, we down sample by an integer
+ factor get in the 11KHz range, and then do it again with a narrower
+ frequency range without down sampling */
+static int findPitchPeriod(sonicStream stream, short* samples,
+ int preferNewPeriod) {
+ int minPeriod = stream->minPeriod;
+ int maxPeriod = stream->maxPeriod;
+ int minDiff, maxDiff, retPeriod;
+ int skip = computeSkip(stream, stream->sampleRate);
+ int period;
+
+ if (stream->numChannels == 1 && skip == 1) {
+ period = findPitchPeriodInRange(samples, minPeriod, maxPeriod, &minDiff,
+ &maxDiff);
+ } else {
+ downSampleInput(stream, samples, skip);
+ period = findPitchPeriodInRange(stream->downSampleBuffer, minPeriod / skip,
+ maxPeriod / skip, &minDiff, &maxDiff);
+ if (skip != 1) {
+ period *= skip;
+ minPeriod = period - (skip << 2);
+ maxPeriod = period + (skip << 2);
+ if (minPeriod < stream->minPeriod) {
+ minPeriod = stream->minPeriod;
+ }
+ if (maxPeriod > stream->maxPeriod) {
+ maxPeriod = stream->maxPeriod;
+ }
+ if (stream->numChannels == 1) {
+ period = findPitchPeriodInRange(samples, minPeriod, maxPeriod, &minDiff,
+ &maxDiff);
+ } else {
+ downSampleInput(stream, samples, 1);
+ period = findPitchPeriodInRange(stream->downSampleBuffer, minPeriod,
+ maxPeriod, &minDiff, &maxDiff);
+ }
+ }
+ }
+ if (prevPeriodBetter(stream, minDiff, maxDiff, preferNewPeriod)) {
+ retPeriod = stream->prevPeriod;
+ } else {
+ retPeriod = period;
+ }
+ stream->prevMinDiff = minDiff;
+ stream->prevPeriod = period;
+ return retPeriod;
+}
+
+/* Overlap two sound segments, ramp the volume of one down, while ramping the
+ other one from zero up, and add them, storing the result at the output. */
+static void overlapAdd(int numSamples, int numChannels, short* out,
+ short* rampDown, short* rampUp) {
+ short* o;
+ short* u;
+ short* d;
+ int i, t;
+
+ for (i = 0; i < numChannels; i++) {
+ o = out + i;
+ u = rampUp + i;
+ d = rampDown + i;
+ for (t = 0; t < numSamples; t++) {
+#ifdef SONIC_USE_SIN
+ float ratio = sin(t * M_PI / (2 * numSamples));
+ *o = *d * (1.0f - ratio) + *u * ratio;
+#else
+ *o = (*d * (numSamples - t) + *u * t) / numSamples;
+#endif
+ o += numChannels;
+ d += numChannels;
+ u += numChannels;
+ }
+ }
+}
+
+/* Just move the new samples in the output buffer to the pitch buffer */
+static int moveNewSamplesToPitchBuffer(sonicStream stream,
+ int originalNumOutputSamples) {
+ int numSamples = stream->numOutputSamples - originalNumOutputSamples;
+ int numChannels = stream->numChannels;
+ int pitchBufferSize = stream->pitchBufferSize;
+
+ if (stream->numPitchSamples + numSamples > pitchBufferSize) {
+ stream->pitchBufferSize += (pitchBufferSize >> 1) + numSamples;
+ stream->pitchBuffer = (short*)sonicRealloc(
+ stream->pitchBuffer, pitchBufferSize, stream->pitchBufferSize,
+ sizeof(short) * numChannels);
+ }
+ memcpy(stream->pitchBuffer + stream->numPitchSamples * numChannels,
+ stream->outputBuffer + originalNumOutputSamples * numChannels,
+ numSamples * sizeof(short) * numChannels);
+ stream->numOutputSamples = originalNumOutputSamples;
+ stream->numPitchSamples += numSamples;
+ return 1;
+}
+
+/* Remove processed samples from the pitch buffer. */
+static void removePitchSamples(sonicStream stream, int numSamples) {
+ int numChannels = stream->numChannels;
+ short* source = stream->pitchBuffer + numSamples * numChannels;
+
+ if (numSamples == 0) {
+ return;
+ }
+ if (numSamples != stream->numPitchSamples) {
+ memmove(
+ stream->pitchBuffer, source,
+ (stream->numPitchSamples - numSamples) * sizeof(short) * numChannels);
+ }
+ stream->numPitchSamples -= numSamples;
+}
+
+/* Approximate the sinc function times a Hann window from the sinc table. */
+static int findSincCoefficient(int i, int ratio, int width) {
+ int lobePoints = (SINC_TABLE_SIZE - 1) / SINC_FILTER_POINTS;
+ int left = i * lobePoints + (ratio * lobePoints) / width;
+ int right = left + 1;
+ int position = i * lobePoints * width + ratio * lobePoints - left * width;
+ int leftVal = sincTable[left];
+ int rightVal = sincTable[right];
+
+ return ((leftVal * (width - position) + rightVal * position) << 1) / width;
+}
+
+/* Return 1 if value >= 0, else -1. This represents the sign of value. */
+static int getSign(int value) { return value >= 0 ? 1 : -1; }
+
+/* Interpolate the new output sample. */
+static short interpolate(sonicStream stream, short* in, int oldSampleRate,
+ int newSampleRate) {
+ /* Compute N-point sinc FIR-filter here. Clip rather than overflow. */
+ int i;
+ int total = 0;
+ int position = stream->newRatePosition * oldSampleRate;
+ int leftPosition = stream->oldRatePosition * newSampleRate;
+ int rightPosition = (stream->oldRatePosition + 1) * newSampleRate;
+ int ratio = rightPosition - position - 1;
+ int width = rightPosition - leftPosition;
+ int weight, value;
+ int oldSign;
+ int overflowCount = 0;
+
+ for (i = 0; i < SINC_FILTER_POINTS; i++) {
+ weight = findSincCoefficient(i, ratio, width);
+ value = in[i * stream->numChannels] * weight;
+ oldSign = getSign(total);
+ total += value;
+ if (oldSign != getSign(total) && getSign(value) == oldSign) {
+ /* We must have overflowed. This can happen with a sinc filter. */
+ overflowCount += oldSign;
+ }
+ }
+ /* It is better to clip than to wrap if there was a overflow. */
+ if (overflowCount > 0) {
+ return SHRT_MAX;
+ } else if (overflowCount < 0) {
+ return SHRT_MIN;
+ }
+ return total >> 16;
+}
+
+/* Change the rate. Interpolate with a sinc FIR filter using a Hann window. */
+static int adjustRate(sonicStream stream, float rate,
+ int originalNumOutputSamples) {
+ int newSampleRate = stream->sampleRate / rate;
+ int oldSampleRate = stream->sampleRate;
+ int numChannels = stream->numChannels;
+ int position;
+ short *in, *out;
+ int i;
+ int N = SINC_FILTER_POINTS;
+
+ /* Set these values to help with the integer math */
+ while (newSampleRate > (1 << 14) || oldSampleRate > (1 << 14)) {
+ newSampleRate >>= 1;
+ oldSampleRate >>= 1;
+ }
+ if (stream->numOutputSamples == originalNumOutputSamples) {
+ return 1;
+ }
+ if (!moveNewSamplesToPitchBuffer(stream, originalNumOutputSamples)) {
+ return 0;
+ }
+ /* Leave at least N pitch sample in the buffer */
+ for (position = 0; position < stream->numPitchSamples - N; position++) {
+ while ((stream->oldRatePosition + 1) * newSampleRate >
+ stream->newRatePosition * oldSampleRate) {
+ if (!enlargeOutputBufferIfNeeded(stream, 1)) {
+ return 0;
+ }
+ out = stream->outputBuffer + stream->numOutputSamples * numChannels;
+ in = stream->pitchBuffer + position * numChannels;
+ for (i = 0; i < numChannels; i++) {
+ *out++ = interpolate(stream, in, oldSampleRate, newSampleRate);
+ in++;
+ }
+ stream->newRatePosition++;
+ stream->numOutputSamples++;
+ }
+ stream->oldRatePosition++;
+ if (stream->oldRatePosition == oldSampleRate) {
+ stream->oldRatePosition = 0;
+ stream->newRatePosition = 0;
+ }
+ }
+ removePitchSamples(stream, position);
+ return 1;
+}
+
+/* Skip over a pitch period. Return the number of output samples. */
+static int skipPitchPeriod(sonicStream stream, short* samples, float speed,
+ int period) {
+ long newSamples;
+ int numChannels = stream->numChannels;
+
+ if (speed >= 2.0f) {
+ /* For speeds >= 2.0, we skip over a portion of each pitch period rather
+ than dropping whole pitch periods. */
+ newSamples = period / (speed - 1.0f);
+ } else {
+ newSamples = period;
+ }
+ if (!enlargeOutputBufferIfNeeded(stream, newSamples)) {
+ return 0;
+ }
+ overlapAdd(newSamples, numChannels,
+ stream->outputBuffer + stream->numOutputSamples * numChannels,
+ samples, samples + period * numChannels);
+ stream->numOutputSamples += newSamples;
+ return newSamples;
+}
+
+/* Insert a pitch period, and determine how much input to copy directly. */
+static int insertPitchPeriod(sonicStream stream, short* samples, float speed,
+ int period) {
+ long newSamples;
+ short* out;
+ int numChannels = stream->numChannels;
+
+ if (speed <= 0.5f) {
+ newSamples = period * speed / (1.0f - speed);
+ } else {
+ newSamples = period;
+ }
+ if (!enlargeOutputBufferIfNeeded(stream, period + newSamples)) {
+ return 0;
+ }
+ out = stream->outputBuffer + stream->numOutputSamples * numChannels;
+ memcpy(out, samples, period * sizeof(short) * numChannels);
+ out =
+ stream->outputBuffer + (stream->numOutputSamples + period) * numChannels;
+ overlapAdd(newSamples, numChannels, out, samples + period * numChannels,
+ samples);
+ stream->numOutputSamples += period + newSamples;
+ return newSamples;
+}
+
+/* PICOLA copies input to output until the total output samples == consumed
+ input samples * speed. */
+static int copyUnmodifiedSamples(sonicStream stream, short* samples,
+ float speed, int position, int* newSamples) {
+ int availableSamples = stream->numInputSamples - position;
+ float inputToCopyFloat =
+ 1 - stream->timeError * speed / (stream->samplePeriod * (speed - 1.0));
+
+ *newSamples = inputToCopyFloat > availableSamples ? availableSamples
+ : (int)inputToCopyFloat;
+ if (!copyToOutput(stream, samples, *newSamples)) {
+ return 0;
+ }
+ stream->timeError +=
+ *newSamples * stream->samplePeriod * (speed - 1.0) / speed;
+ return 1;
+}
+
+/* Resample as many pitch periods as we have buffered on the input. Return 0 if
+ we fail to resize an input or output buffer. */
+static int changeSpeed(sonicStream stream, float speed) {
+ short* samples;
+ int numSamples = stream->numInputSamples;
+ int position = 0, period, newSamples;
+ int maxRequired = stream->maxRequired;
+
+ if (stream->numInputSamples < maxRequired) {
+ return 1;
+ }
+ do {
+ samples = stream->inputBuffer + position * stream->numChannels;
+ if ((speed > 1.0f && speed < 2.0f && stream->timeError < 0.0f) ||
+ (speed < 1.0f && speed > 0.5f && stream->timeError > 0.0f)) {
+ /* Deal with the case where PICOLA is still copying input samples to
+ output unmodified, */
+ if (!copyUnmodifiedSamples(stream, samples, speed, position,
+ &newSamples)) {
+ return 0;
+ }
+ position += newSamples;
+ } else {
+ /* We are in the remaining cases, either inserting/removing a pitch period
+ for speed < 2.0X, or a portion of one for speed >= 2.0X. */
+ period = findPitchPeriod(stream, samples, 1);
+#ifdef SONIC_SPECTROGRAM
+ if (stream->spectrogram != NULL) {
+ sonicAddPitchPeriodToSpectrogram(stream->spectrogram, samples, period,
+ stream->numChannels);
+ newSamples = period;
+ position += period;
+ } else
+#endif /* SONIC_SPECTROGRAM */
+ if (speed > 1.0) {
+ newSamples = skipPitchPeriod(stream, samples, speed, period);
+ position += period + newSamples;
+ if (speed < 2.0) {
+ stream->timeError += newSamples * stream->samplePeriod -
+ (period + newSamples) * stream->inputPlayTime /
+ stream->numInputSamples;
+ }
+ } else {
+ newSamples = insertPitchPeriod(stream, samples, speed, period);
+ position += newSamples;
+ if (speed > 0.5) {
+ stream->timeError +=
+ (period + newSamples) * stream->samplePeriod -
+ newSamples * stream->inputPlayTime / stream->numInputSamples;
+ }
+ }
+ if (newSamples == 0) {
+ return 0; /* Failed to resize output buffer */
+ }
+ }
+ } while (position + maxRequired <= numSamples);
+ removeInputSamples(stream, position);
+ return 1;
+}
+
+/* Resample as many pitch periods as we have buffered on the input. Return 0 if
+ we fail to resize an input or output buffer. Also scale the output by the
+ volume. */
+static int processStreamInput(sonicStream stream) {
+ int originalNumOutputSamples = stream->numOutputSamples;
+ float rate = stream->rate * stream->pitch;
+ float localSpeed;
+
+ if (stream->numInputSamples == 0) {
+ return 1;
+ }
+ localSpeed =
+ stream->numInputSamples * stream->samplePeriod / stream->inputPlayTime;
+ if (localSpeed > 1.00001 || localSpeed < 0.99999) {
+ changeSpeed(stream, localSpeed);
+ } else {
+ if (!copyInputToOutput(stream, stream->numInputSamples)) {
+ return 0;
+ }
+ }
+ if (rate != 1.0f) {
+ if (!adjustRate(stream, rate, originalNumOutputSamples)) {
+ return 0;
+ }
+ }
+ if (stream->volume != 1.0f) {
+ /* Adjust output volume. */
+ scaleSamples(
+ stream->outputBuffer + originalNumOutputSamples * stream->numChannels,
+ (stream->numOutputSamples - originalNumOutputSamples) *
+ stream->numChannels,
+ stream->volume);
+ }
+ return 1;
+}
+
+/* Write floating point data to the input buffer and process it. */
+int sonicWriteFloatToStream(sonicStream stream, const float* samples,
+ int numSamples) {
+ if (!addFloatSamplesToInputBuffer(stream, samples, numSamples)) {
+ return 0;
+ }
+ return processStreamInput(stream);
+}
+
+/* Simple wrapper around sonicWriteFloatToStream that does the short to float
+ conversion for you. */
+int sonicWriteShortToStream(sonicStream stream, const short* samples,
+ int numSamples) {
+ if (!addShortSamplesToInputBuffer(stream, samples, numSamples)) {
+ return 0;
+ }
+ return processStreamInput(stream);
+}
+
+/* Simple wrapper around sonicWriteFloatToStream that does the unsigned char to
+ float conversion for you. */
+int sonicWriteUnsignedCharToStream(sonicStream stream,
+ const unsigned char* samples,
+ int numSamples) {
+ if (!addUnsignedCharSamplesToInputBuffer(stream, samples, numSamples)) {
+ return 0;
+ }
+ return processStreamInput(stream);
+}
+
+/* This is a non-stream oriented interface to just change the speed of a sound
+ * sample */
+int sonicChangeFloatSpeed(float* samples, int numSamples, float speed,
+ float pitch, float rate, float volume,
+ int useChordPitch, int sampleRate, int numChannels) {
+ sonicStream stream = sonicCreateStream(sampleRate, numChannels);
+
+ sonicSetSpeed(stream, speed);
+ sonicSetPitch(stream, pitch);
+ sonicSetRate(stream, rate);
+ sonicSetVolume(stream, volume);
+ sonicWriteFloatToStream(stream, samples, numSamples);
+ sonicFlushStream(stream);
+ numSamples = sonicSamplesAvailable(stream);
+ sonicReadFloatFromStream(stream, samples, numSamples);
+ sonicDestroyStream(stream);
+ return numSamples;
+}
+
+/* This is a non-stream oriented interface to just change the speed of a sound
+ * sample */
+int sonicChangeShortSpeed(short* samples, int numSamples, float speed,
+ float pitch, float rate, float volume,
+ int useChordPitch, int sampleRate, int numChannels) {
+ sonicStream stream = sonicCreateStream(sampleRate, numChannels);
+
+ sonicSetSpeed(stream, speed);
+ sonicSetPitch(stream, pitch);
+ sonicSetRate(stream, rate);
+ sonicSetVolume(stream, volume);
+ sonicWriteShortToStream(stream, samples, numSamples);
+ sonicFlushStream(stream);
+ numSamples = sonicSamplesAvailable(stream);
+ sonicReadShortFromStream(stream, samples, numSamples);
+ sonicDestroyStream(stream);
+ return numSamples;
+}
\ No newline at end of file
diff --git a/app/src/main/cpp/sonic.h b/app/src/main/cpp/sonic.h
new file mode 100644
index 00000000..15e2abf0
--- /dev/null
+++ b/app/src/main/cpp/sonic.h
@@ -0,0 +1,307 @@
+//
+// Created by JIBUM HAN on 2026. 4. 11..
+//
+#ifndef SONIC_H_
+#define SONIC_H_
+
+/* Sonic library
+ Copyright 2010
+ Bill Cox
+ This file is part of the Sonic Library.
+
+ This file is licensed under the Apache 2.0 license.
+*/
+
+/*
+The Sonic Library implements a new algorithm invented by Bill Cox for the
+specific purpose of speeding up speech by high factors at high quality. It
+generates smooth speech at speed up factors as high as 6X, possibly more. It is
+also capable of slowing down speech, and generates high quality results
+regardless of the speed up or slow down factor. For speeding up speech by 2X or
+more, the following equation is used:
+
+ newSamples = period/(speed - 1.0)
+ scale = 1.0/newSamples;
+
+where period is the current pitch period, determined using AMDF or any other
+pitch estimator, and speed is the speedup factor. If the current position in
+the input stream is pointed to by "samples", and the current output stream
+position is pointed to by "out", then newSamples number of samples can be
+generated with:
+
+ out[t] = (samples[t]*(newSamples - t) + samples[t + period]*t)/newSamples;
+
+where t = 0 to newSamples - 1.
+
+For speed factors < 2X, the PICOLA algorithm is used. The above
+algorithm is first used to double the speed of one pitch period. Then, enough
+input is directly copied from the input to the output to achieve the desired
+speed up factor, where 1.0 < speed < 2.0. The amount of data copied is derived:
+
+ speed = (2*period + length)/(period + length)
+ speed*length + speed*period = 2*period + length
+ length(speed - 1) = 2*period - speed*period
+ length = period*(2 - speed)/(speed - 1)
+
+For slowing down speech where 0.5 < speed < 1.0, a pitch period is inserted into
+the output twice, and length of input is copied from the input to the output
+until the output desired speed is reached. The length of data copied is:
+
+ length = period*(speed - 0.5)/(1 - speed)
+
+For slow down factors below 0.5, no data is copied, and an algorithm
+similar to high speed factors is used.
+*/
+
+/* Uncomment this to use sin-wav based overlap add which in theory can improve
+ sound quality slightly, at the expense of lots of floating point math. */
+/* #define SONIC_USE_SIN */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef SONIC_INTERNAL
+/* The following #define's are used to change the names of the routines defined
+ * here so that a new library (i.e. speedy) can reuse these names, and then call
+ * the original names. We do this for two reasons: 1) we don't want to change
+ * the original API, and 2) we want to add a shim, using the original names and
+ * still call these routines.
+ *
+ * Original users of this API and the libsonic library need to do nothing. The
+ * original behavior remains.
+ *
+ * A new user that add some additional functionality above this library (a shim)
+ * should #define SONIC_INTERNAL before including this file, undefine all these
+ * symbols and call the sonicIntXXX functions directly.
+ */
+#define sonicCreateStream sonicIntCreateStream
+#define sonicDestroyStream sonicIntDestroyStream
+#define sonicWriteFloatToStream sonicIntWriteFloatToStream
+#define sonicWriteShortToStream sonicIntWriteShortToStream
+#define sonicWriteUnsignedCharToStream sonicIntWriteUnsignedCharToStream
+#define sonicReadFloatFromStream sonicIntReadFloatFromStream
+#define sonicReadShortFromStream sonicIntReadShortFromStream
+#define sonicReadUnsignedCharFromStream sonicIntReadUnsignedCharFromStream
+#define sonicFlushStream sonicIntFlushStream
+#define sonicSamplesAvailable sonicIntSamplesAvailable
+#define sonicGetSpeed sonicIntGetSpeed
+#define sonicSetSpeed sonicIntSetSpeed
+#define sonicGetPitch sonicIntGetPitch
+#define sonicSetPitch sonicIntSetPitch
+#define sonicGetRate sonicIntGetRate
+#define sonicSetRate sonicIntSetRate
+#define sonicGetVolume sonicIntGetVolume
+#define sonicSetVolume sonicIntSetVolume
+#define sonicGetQuality sonicIntGetQuality
+#define sonicSetQuality sonicIntSetQuality
+#define sonicGetSampleRate sonicIntGetSampleRate
+#define sonicSetSampleRate sonicIntSetSampleRate
+#define sonicGetNumChannels sonicIntGetNumChannels
+#define sonicGetUserData sonicIntGetUserData
+#define sonicSetUserData sonicIntSetUserData
+#define sonicSetNumChannels sonicIntSetNumChannels
+#define sonicChangeFloatSpeed sonicIntChangeFloatSpeed
+#define sonicChangeShortSpeed sonicIntChangeShortSpeed
+#define sonicEnableNonlinearSpeedup sonicIntEnableNonlinearSpeedup
+#define sonicSetDurationFeedbackStrength sonicIntSetDurationFeedbackStrength
+#define sonicComputeSpectrogram sonicIntComputeSpectrogram
+#define sonicGetSpectrogram sonicIntGetSpectrogram
+
+#endif /* SONIC_INTERNAL */
+
+/* This specifies the range of voice pitches we try to match.
+ Note that if we go lower than 65, we could overflow in findPitchInRange */
+#ifndef SONIC_MIN_PITCH
+#define SONIC_MIN_PITCH 65
+#endif /* SONIC_MIN_PITCH */
+#ifndef SONIC_MAX_PITCH
+#define SONIC_MAX_PITCH 400
+#endif /* SONIC_MAX_PITCH */
+
+/* The following values are used to clamp inputs such as speed to sane values.
+ */
+#define SONIC_MIN_VOLUME 0.01f
+#define SONIC_MAX_VOLUME 100.0f
+#define SONIC_MIN_SPEED 0.05f
+#define SONIC_MAX_SPEED 20.0f
+#define SONIC_MIN_PITCH_SETTING 0.05f
+#define SONIC_MAX_PITCH_SETTING 20.0f
+#define SONIC_MIN_RATE 0.05f
+#define SONIC_MAX_RATE 20.0f
+#define SONIC_MIN_SAMPLE_RATE 1000
+#define SONIC_MAX_SAMPLE_RATE 500000
+#define SONIC_MIN_CHANNELS 1
+#define SONIC_MAX_CHANNELS 32
+
+/* These are used to down-sample some inputs to improve speed */
+#define SONIC_AMDF_FREQ 4000
+
+struct sonicStreamStruct;
+typedef struct sonicStreamStruct* sonicStream;
+
+/* For all of the following functions, numChannels is multiplied by numSamples
+ to determine the actual number of values read or returned. */
+
+/* Create a sonic stream. Return NULL only if we are out of memory and cannot
+ allocate the stream. Set numChannels to 1 for mono, and 2 for stereo. */
+sonicStream sonicCreateStream(int sampleRate, int numChannels);
+/* Destroy the sonic stream. */
+void sonicDestroyStream(sonicStream stream);
+/* Attach user data to the stream. */
+void sonicSetUserData(sonicStream stream, void *userData);
+/* Retrieve user data attached to the stream. */
+void *sonicGetUserData(sonicStream stream);
+/* Use this to write floating point data to be speed up or down into the stream.
+ Values must be between -1 and 1. Return 0 if memory realloc failed,
+ otherwise 1 */
+int sonicWriteFloatToStream(sonicStream stream, const float* samples, int numSamples);
+/* Use this to write 16-bit data to be speed up or down into the stream.
+ Return 0 if memory realloc failed, otherwise 1 */
+int sonicWriteShortToStream(sonicStream stream, const short* samples, int numSamples);
+/* Use this to write 8-bit unsigned data to be speed up or down into the stream.
+ Return 0 if memory realloc failed, otherwise 1 */
+int sonicWriteUnsignedCharToStream(sonicStream stream, const unsigned char* samples,
+ int numSamples);
+/* Use this to read floating point data out of the stream. Sometimes no data
+ will be available, and zero is returned, which is not an error condition. */
+int sonicReadFloatFromStream(sonicStream stream, float* samples,
+ int maxSamples);
+/* Use this to read 16-bit data out of the stream. Sometimes no data will
+ be available, and zero is returned, which is not an error condition. */
+int sonicReadShortFromStream(sonicStream stream, short* samples,
+ int maxSamples);
+/* Use this to read 8-bit unsigned data out of the stream. Sometimes no data
+ will be available, and zero is returned, which is not an error condition. */
+int sonicReadUnsignedCharFromStream(sonicStream stream, unsigned char* samples,
+ int maxSamples);
+/* Force the sonic stream to generate output using whatever data it currently
+ has. No extra delay will be added to the output, but flushing in the middle
+ of words could introduce distortion. */
+int sonicFlushStream(sonicStream stream);
+/* Return the number of samples in the output buffer */
+int sonicSamplesAvailable(sonicStream stream);
+/* Get the speed of the stream. */
+float sonicGetSpeed(sonicStream stream);
+/* Set the speed of the stream. */
+void sonicSetSpeed(sonicStream stream, float speed);
+/* Get the pitch of the stream. */
+float sonicGetPitch(sonicStream stream);
+/* Set the pitch of the stream. */
+void sonicSetPitch(sonicStream stream, float pitch);
+/* Get the rate of the stream. */
+float sonicGetRate(sonicStream stream);
+/* Set the rate of the stream. */
+void sonicSetRate(sonicStream stream, float rate);
+/* Get the scaling factor of the stream. */
+float sonicGetVolume(sonicStream stream);
+/* Set the scaling factor of the stream. */
+void sonicSetVolume(sonicStream stream, float volume);
+/* Chord pitch is DEPRECATED. AFAIK, it was never used by anyone. These
+ functions still exist to avoid breaking existing code. */
+/* Get the chord pitch setting. */
+int sonicGetChordPitch(sonicStream stream);
+/* Set chord pitch mode on or off. Default is off. See the documentation
+ page for a description of this feature. */
+void sonicSetChordPitch(sonicStream stream, int useChordPitch);
+/* Get the quality setting. */
+int sonicGetQuality(sonicStream stream);
+/* Set the "quality". Default 0 is virtually as good as 1, but very much
+ * faster. */
+void sonicSetQuality(sonicStream stream, int quality);
+/* Get the sample rate of the stream. */
+int sonicGetSampleRate(sonicStream stream);
+/* Set the sample rate of the stream. This will drop any samples that have not
+ * been read. */
+void sonicSetSampleRate(sonicStream stream, int sampleRate);
+/* Get the number of channels. */
+int sonicGetNumChannels(sonicStream stream);
+/* Set the number of channels. This will drop any samples that have not been
+ * read. */
+void sonicSetNumChannels(sonicStream stream, int numChannels);
+/* This is a non-stream oriented interface to just change the speed of a sound
+ sample. It works in-place on the sample array, so there must be at least
+ speed*numSamples available space in the array. Returns the new number of
+ samples. */
+int sonicChangeFloatSpeed(float* samples, int numSamples, float speed,
+ float pitch, float rate, float volume,
+ int useChordPitch, int sampleRate, int numChannels);
+/* This is a non-stream oriented interface to just change the speed of a sound
+ sample. It works in-place on the sample array, so there must be at least
+ speed*numSamples available space in the array. Returns the new number of
+ samples. */
+int sonicChangeShortSpeed(short* samples, int numSamples, float speed,
+ float pitch, float rate, float volume,
+ int useChordPitch, int sampleRate, int numChannels);
+
+#ifdef SONIC_SPECTROGRAM
+/*
+This code generates high quality spectrograms from sound samples, using
+Time-Aliased-FFTs as described at:
+
+ https://github.com/waywardgeek/spectrogram
+
+Basically, two adjacent pitch periods are overlap-added to create a sound
+sample that accurately represents the speech sound at that moment in time.
+This set of samples is converted to a spetral line using an FFT, and the result
+is saved as a single spectral line at that moment in time. The resulting
+spectral lines vary in resolution (it is equal to the number of samples in the
+pitch period), and the spacing of spectral lines also varies (proportional to
+the numver of samples in the pitch period).
+
+To generate a bitmap, linear interpolation is used to render the grayscale
+value at any particular point in time and frequency.
+*/
+
+#define SONIC_MAX_SPECTRUM_FREQ 5000
+
+struct sonicSpectrogramStruct;
+struct sonicBitmapStruct;
+typedef struct sonicSpectrogramStruct* sonicSpectrogram;
+typedef struct sonicBitmapStruct* sonicBitmap;
+
+/* sonicBitmap objects represent spectrograms as grayscale bitmaps where each
+ pixel is from 0 (black) to 255 (white). Bitmaps are rows*cols in size.
+ Rows are indexed top to bottom and columns are indexed left to right */
+struct sonicBitmapStruct {
+ unsigned char* data;
+ int numRows;
+ int numCols;
+};
+
+/* Enable coomputation of a spectrogram on the fly. */
+void sonicComputeSpectrogram(sonicStream stream);
+
+/* Get the spectrogram. */
+sonicSpectrogram sonicGetSpectrogram(sonicStream stream);
+
+/* Create an empty spectrogram. Called automatically if sonicComputeSpectrogram
+ has been called. */
+sonicSpectrogram sonicCreateSpectrogram(int sampleRate);
+
+/* Destroy the spectrotram. This is called automatically when calling
+ sonicDestroyStream. */
+void sonicDestroySpectrogram(sonicSpectrogram spectrogram);
+
+/* Convert the spectrogram to a bitmap. Caller must destroy bitmap when done. */
+sonicBitmap sonicConvertSpectrogramToBitmap(sonicSpectrogram spectrogram,
+ int numRows, int numCols);
+
+/* Destroy a bitmap returned by sonicConvertSpectrogramToBitmap. */
+void sonicDestroyBitmap(sonicBitmap bitmap);
+
+int sonicWritePGM(sonicBitmap bitmap, char* fileName);
+
+/* Add two pitch periods worth of samples to the spectrogram. There must be
+ 2*period samples. Time should advance one pitch period for each call to
+ this function. */
+void sonicAddPitchPeriodToSpectrogram(sonicSpectrogram spectrogram,
+ short* samples, int numSamples,
+ int numChannels);
+#endif /* SONIC_SPECTROGRAM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SONIC_H_ */
\ No newline at end of file
diff --git a/app/src/main/kotlin/bums/lunatic/launcher/LauncherActivity.kt b/app/src/main/kotlin/bums/lunatic/launcher/LauncherActivity.kt
index 72d5471e..b6414728 100644
--- a/app/src/main/kotlin/bums/lunatic/launcher/LauncherActivity.kt
+++ b/app/src/main/kotlin/bums/lunatic/launcher/LauncherActivity.kt
@@ -951,16 +951,26 @@ open class LauncherActivity : CommonActivity() {
private val messageListener = MessageClient.OnMessageReceivedListener { messageEvent ->
when (messageEvent.path) {
+ "/gesture/right",
"/gesture/next" -> {
// 시계에서 오른쪽으로 휘둘렀을 때: 예) 다음 배경화면으로 변경
+
+ }
+ "/gesture/left",
+ "/gesture/prev" -> {
+ onSwipeRight()
+ // 시계에서 왼쪽으로 휘둘렀을 때: 예) 앱 서랍 열기
+
+// showToast("시계 제스처: 앱 서랍")
+ }
+ "/gesture/down" ->{
+ onSwipeDown()
+ }
+ "/gesture/up"->{
val intent = Intent("ACTION_NEXT_WALLPAPER").setPackage(packageName)
sendBroadcast(intent)
showToast("시계 제스처: 다음 배경화면")
- }
- "/gesture/prev" -> {
- // 시계에서 왼쪽으로 휘둘렀을 때: 예) 앱 서랍 열기
- showAppDrawer()
- showToast("시계 제스처: 앱 서랍")
+// showAppDrawer()
}
}
}
diff --git a/app/src/main/kotlin/bums/lunatic/launcher/home/CompletedFilesFragment.kt b/app/src/main/kotlin/bums/lunatic/launcher/home/CompletedFilesFragment.kt
index f54ac029..275cf203 100644
--- a/app/src/main/kotlin/bums/lunatic/launcher/home/CompletedFilesFragment.kt
+++ b/app/src/main/kotlin/bums/lunatic/launcher/home/CompletedFilesFragment.kt
@@ -168,6 +168,8 @@ class CompletedFilesFragment : Fragment() {
loadFiles()
} else {
if (extVideos.contains(file.extension.lowercase())) {
+ trackFileAccess(file.name)
+ loadFiles()
val intent = Intent(requireContext(), PlayerActivity::class.java).apply {
putExtra("VIDEO_PATH", file.absolutePath)
}
diff --git a/app/src/main/kotlin/bums/lunatic/launcher/home/NeoRssActivity.kt b/app/src/main/kotlin/bums/lunatic/launcher/home/NeoRssActivity.kt
index bd7d1b27..d03a8480 100644
--- a/app/src/main/kotlin/bums/lunatic/launcher/home/NeoRssActivity.kt
+++ b/app/src/main/kotlin/bums/lunatic/launcher/home/NeoRssActivity.kt
@@ -152,7 +152,9 @@ open class NeoRssActivity : CommonActivity() {
//// showContents(R.id.close)
// }
// }
+
when (messageEvent.path) {
+ "/gesture/right",
"/gesture/next" -> {
when(currentFragment) {
is RssHome ->{
@@ -173,9 +175,12 @@ open class NeoRssActivity : CommonActivity() {
}
}
}
+ "/gesture/left",
"/gesture/prev" -> {
currentFragment.onRemoteLeft(false)
}
+ "/gesture/up" ->{}
+ "/gesture/down" ->{}
}
}
diff --git a/app/src/main/kotlin/bums/lunatic/launcher/player/NativePlayer.kt b/app/src/main/kotlin/bums/lunatic/launcher/player/NativePlayer.kt
index b79388e6..cc928607 100644
--- a/app/src/main/kotlin/bums/lunatic/launcher/player/NativePlayer.kt
+++ b/app/src/main/kotlin/bums/lunatic/launcher/player/NativePlayer.kt
@@ -24,6 +24,19 @@ class NativePlayer {
fun seekBy(sec: Double) = nativeSeekBy(nativeHandle, sec)
fun setSpeed(speed: Float) = nativeSetSpeed(nativeHandle, speed)
+ // 외부 함수 호출 정의
+ fun getDuration(): Double {
+ return if (nativeHandle != 0L) nativeGetDuration(nativeHandle) else 0.0
+ }
+
+ fun seekTo(sec: Double) {
+ if (nativeHandle != 0L) nativeSeekTo(nativeHandle, sec)
+ }
+
+ // 하단에 external fun 추가
+ private external fun nativeGetDuration(h: Long): Double
+ private external fun nativeSeekTo(h: Long, s: Double)
+
@Suppress("unused")
private fun onSubtitleTextDecoded(text: String) {
subtitleCallback?.invoke(text)
diff --git a/app/src/main/kotlin/bums/lunatic/launcher/player/PlayerActivity.kt b/app/src/main/kotlin/bums/lunatic/launcher/player/PlayerActivity.kt
index 460d66a0..9c96326f 100644
--- a/app/src/main/kotlin/bums/lunatic/launcher/player/PlayerActivity.kt
+++ b/app/src/main/kotlin/bums/lunatic/launcher/player/PlayerActivity.kt
@@ -93,6 +93,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
onPreparedListener = {
runOnUiThread {
loadAvailableSubtitles()
+ startUIUpdateLoop()
if (allSubtitleTracks.size > 1) {
showSubtitleSelectionDialog()
} else {
@@ -114,6 +115,11 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
setupGestures()
}
+ private lateinit var seekBar: android.widget.SeekBar
+ private lateinit var tvTime: TextView
+ private var uiUpdateJob: Job? = null
+ private var isUserSeeking = false
+
private fun setupUI() {
val root = FrameLayout(this).apply { setBackgroundColor(Color.BLACK) }
@@ -156,11 +162,80 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
root.addView(gestureLayer)
root.addView(btnRotate, FrameLayout.LayoutParams(150, 150, Gravity.BOTTOM or Gravity.START).apply { setMargins(30,0,0,30) })
root.addView(btnHideVideo, FrameLayout.LayoutParams(150, 150, Gravity.BOTTOM or Gravity.END).apply { setMargins(0,0,30,30) })
+ val bottomControlLayout = android.widget.LinearLayout(this).apply {
+ orientation = android.widget.LinearLayout.HORIZONTAL
+ gravity = Gravity.CENTER_VERTICAL
+ setBackgroundColor(Color.parseColor("#99000000")) // 반투명 검은색 배경
+ setPadding(40, 20, 40, 20)
+ }
+
+ tvTime = TextView(this).apply {
+ setTextColor(Color.WHITE)
+ textSize = 16f
+ text = "00:00 / 00:00"
+ }
+
+ seekBar = android.widget.SeekBar(this).apply {
+ layoutParams = android.widget.LinearLayout.LayoutParams(0, -2, 1f).apply {
+ setMargins(30, 0, 30, 0)
+ }
+
+ setOnSeekBarChangeListener(object : android.widget.SeekBar.OnSeekBarChangeListener {
+ override fun onProgressChanged(bar: android.widget.SeekBar?, progress: Int, fromUser: Boolean) {
+ if (fromUser) {
+ val duration = nativePlayer?.getDuration() ?: 0.0
+ tvTime.text = "${formatTime(progress.toDouble())} / ${formatTime(duration)}"
+ }
+ }
+ override fun onStartTrackingTouch(bar: android.widget.SeekBar?) {
+ isUserSeeking = true // 사용자가 잡고 있을 때는 자동 업데이트 중지
+ }
+ override fun onStopTrackingTouch(bar: android.widget.SeekBar?) {
+ isUserSeeking = false
+ val targetSec = bar?.progress?.toDouble() ?: 0.0
+ nativePlayer?.seekTo(targetSec) // 원하는 위치로 이동 요청
+ }
+ })
+ }
+
+ bottomControlLayout.addView(tvTime)
+ bottomControlLayout.addView(seekBar)
+
+ root.addView(bottomControlLayout, FrameLayout.LayoutParams(-1, -2, Gravity.BOTTOM))
setContentView(root)
+
hideSystemUI()
}
+ private fun formatTime(seconds: Double): String {
+ val totalSec = seconds.toInt()
+ val h = totalSec / 3600
+ val m = (totalSec % 3600) / 60
+ val s = totalSec % 60
+ return if (h > 0) String.format("%02d:%02d:%02d", h, m, s) else String.format("%02d:%02d", m, s)
+ }
+
+ // UI 갱신 루프 시작 함수 추가
+ private fun startUIUpdateLoop() {
+ uiUpdateJob?.cancel()
+ uiUpdateJob = CoroutineScope(Dispatchers.Main).launch {
+ while (isActive) {
+ if (isPlaying && !isUserSeeking) {
+ val currentPos = nativePlayer?.getCurrentPosition() ?: 0.0
+ val duration = nativePlayer?.getDuration() ?: 0.0
+
+ if (duration > 0) {
+ seekBar.max = duration.toInt()
+ seekBar.progress = currentPos.toInt()
+ tvTime.text = "${formatTime(currentPos)} / ${formatTime(duration)}"
+ }
+ }
+ delay(500) // 0.5초마다 UI 갱신
+ }
+ }
+ }
+
override fun onSurfaceTextureAvailable(st: SurfaceTexture, w: Int, h: Int) {
val videoFile = File(videoPath)
if (videoFile.exists()) {
@@ -232,7 +307,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
val rightDetector = GestureDetector(this, object : GestureDetector.SimpleOnGestureListener() {
override fun onLongPress(e: MotionEvent) { nativePlayer?.setSpeed(4.0f) }
override fun onSingleTapUp(e: MotionEvent): Boolean {
- nativePlayer?.seekBy(20.0)
+ nativePlayer?.seekBy(40.0)
return true
}
})
@@ -329,19 +404,31 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
private fun cleanSubtitleText(text: String): String = text.replace(Regex("\\{.*?\\}"), "")
var lastSubTitle : String = ""
+
+ // 💡 클래스 상단에 인덱스 기억용 변수 하나만 추가해 주세요.
+ private var currentSubtitleIndex = 0
+
private fun startSubtitleSyncLoop() {
subtitleSyncJob?.cancel()
+
+ // 💡 자막이 아예 없다면 작업을 시작조차 하지 않음
+ if (externalSubtitles.isEmpty()) {
+ subtitleView.visibility = View.INVISIBLE
+ return
+ }
+
+ // 💡 Dispatchers.Main 대신 Default에서 연산하고 UI만 Main에서 갱신하는 것이 버퍼링 방지에 좋습니다.
subtitleSyncJob = CoroutineScope(Dispatchers.Main).launch {
while (isActive) {
if (isPlaying) {
val currentSec = nativePlayer?.getCurrentPosition() ?: 0.0
- val currentSub = externalSubtitles.find { currentSec in it.startSec..it.endSec }
+
+ // 💡 최적화된 인덱스 탐색 함수 호출
+ val currentSub = findSubtitleIndexed(currentSec)
if (currentSub != null) {
- // 💡 번역본이 존재하면 [번역본] + [줄바꿈] + [원본] 형태로 보여주거나, 번역본만 보여줍니다.
val displayText = if (currentSub.translatedText != null) {
"${currentSub.translatedText}\n${cleanSubtitleText(currentSub.text)}"
- // (원본이 보기 싫다면 그냥 currentSub.translatedText 만 넣으셔도 됩니다!)
} else {
cleanSubtitleText(currentSub.text)
}
@@ -355,11 +442,36 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
subtitleView.visibility = View.INVISIBLE
}
}
- delay(100)
+ delay(200)
}
}
}
+ /**
+ * 💡 인덱스 기반 탐색: 이전 위치부터 찾기 때문에 CPU 부하가 거의 없습니다.
+ */
+ private fun findSubtitleIndexed(currentSec: Double): SubtitleBlock? {
+ // 1. 영상이 뒤로 감기 되었을 경우 인덱스 초기화
+ if (currentSubtitleIndex >= externalSubtitles.size ||
+ externalSubtitles[currentSubtitleIndex].startSec > currentSec) {
+ currentSubtitleIndex = 0
+ }
+
+ // 2. 마지막으로 찾았던 위치(currentSubtitleIndex)부터 탐색 시작
+ for (i in currentSubtitleIndex until externalSubtitles.size) {
+ val item = externalSubtitles[i]
+
+ if (currentSec in item.startSec..item.endSec) {
+ currentSubtitleIndex = i // 현재 위치 저장
+ return item
+ }
+
+ // 3. 자막이 시간순으로 정렬되어 있다면, 현재 시간보다 시작 시간이 커지는 순간 루프 종료
+ if (item.startSec > currentSec) break
+ }
+ return null
+ }
+
private fun readTextWithEncoding(file: File): String {
val bytes = file.readBytes()
@@ -450,6 +562,7 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
} catch (e: Exception) {
Log.e("PlayerActivity", "Subtitle parsing error", e)
}
+ result.sortBy { it.startSec }
return result
}
@@ -561,5 +674,6 @@ class PlayerActivity : AppCompatActivity(), TextureView.SurfaceTextureListener {
nativePlayer?.destroy()
leftLongPressJob?.cancel()
subtitleSyncJob?.cancel()
+ uiUpdateJob?.cancel()
}
}
\ No newline at end of file
diff --git a/lun_launcher/src/main/java/bums/lunatic/launcher/tile/MainTileService.kt b/lun_launcher/src/main/java/bums/lunatic/launcher/tile/MainTileService.kt
index e0306a63..ca833a28 100644
--- a/lun_launcher/src/main/java/bums/lunatic/launcher/tile/MainTileService.kt
+++ b/lun_launcher/src/main/java/bums/lunatic/launcher/tile/MainTileService.kt
@@ -4,6 +4,7 @@ import android.app.PendingIntent
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
+import android.util.Log
import androidx.wear.protolayout.ActionBuilders
import androidx.wear.protolayout.ColorBuilders.argb
import androidx.wear.protolayout.DimensionBuilders.dp
@@ -38,7 +39,39 @@ class MainTileService : SuspendingTileService() {
override suspend fun tileRequest(
requestParams: RequestBuilders.TileRequest
- ) = tile(requestParams, this)
+ ): TileBuilders.Tile {
+ // 💡 사용자가 방금 누른 버튼의 ID(path)를 가져옵니다.
+ val clickedPath = requestParams.currentState.lastClickableId
+
+ // 눌린 버튼이 있다면 폰(런처)으로 메시지 전송!
+ if (clickedPath.isNotEmpty()) {
+ Log.d("MainTileService", "Clicked path: $clickedPath")
+
+ Wearable.getNodeClient(this).connectedNodes.addOnSuccessListener { nodes ->
+ for (node in nodes) {
+ Wearable.getMessageClient(this).sendMessage(node.id, clickedPath, null)
+ }
+ }
+ }
+
+ // 타일 UI 렌더링
+ val singleTileTimeline = TimelineBuilders.Timeline.Builder()
+ .addTimelineEntry(
+ TimelineBuilders.TimelineEntry.Builder()
+ .setLayout(
+ LayoutElementBuilders.Layout.Builder()
+ .setRoot(tileLayout(requestParams, this))
+ .build()
+ )
+ .build()
+ )
+ .build()
+
+ return TileBuilders.Tile.Builder()
+ .setResourcesVersion(RESOURCES_VERSION)
+ .setTileTimeline(singleTileTimeline)
+ .build()
+ }
}
private fun resources(
@@ -73,57 +106,56 @@ private fun tile(
private fun tileLayout(requestParams: RequestBuilders.TileRequest, context: Context): LayoutElementBuilders.LayoutElement {
- fun createNavButton(label: String, path: String,x : Float, y :Float): LayoutElementBuilders.LayoutElement {
- // 리시버를 실행하기 위한 PendingIntent 생성
- val intent = Intent(context, TileActionReceiver::class.java).apply {
- putExtra("path", path)
- }
- val pendingIntent = PendingIntent.getBroadcast(
- context, path.hashCode(), intent,
- PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE
- )
-
+ // 버튼 생성 함수 (크기를 48dp로 살짝 줄여서 화면에 쏙 들어가게 맞춤)
+ fun createNavButton(label: String, path: String): LayoutElementBuilders.LayoutElement {
return LayoutElementBuilders.Box.Builder()
.setModifiers(ModifiersBuilders.Modifiers.Builder()
.setClickable(ModifiersBuilders.Clickable.Builder()
- .setOnClick(ActionBuilders.LaunchAction.Builder()
- .setAndroidActivity(ActionBuilders.AndroidActivity.Builder()
- .setPackageName(context.packageName)
- .setClassName(TileActionReceiver::class.java.name) // 리시버 호출
- .build())
- .build())
- .build())
- .setTransformation(ModifiersBuilders.Transformation.Builder()
- // 방향에 따라 x, y 좌표 조절 (상: 0,-45 / 하: 0,45 / 좌: -45,0 / 우: 45,0)
- .setTranslationX(dp(x))
- .setTranslationY(dp(y))
+ .setId(path)
+ .setOnClick(ActionBuilders.LoadAction.Builder().build())
.build())
.setBackground(ModifiersBuilders.Background.Builder()
.setColor(argb(0xFF303030.toInt()))
- .setCorner(ModifiersBuilders.Corner.Builder().setRadius(dp(15f)).build())
+ .setCorner(ModifiersBuilders.Corner.Builder().setRadius(dp(24f)).build()) // 원형에 가깝게
.build())
.build())
- .setWidth(dp(56f)).setHeight(dp(56f))
- .addContent(Text.Builder(context, label).build())
+ .setWidth(dp(48f)).setHeight(dp(48f))
+ .addContent(
+ Text.Builder(context, label)
+ .setTypography(Typography.TYPOGRAPHY_CAPTION2) // 글자 크기 최적화
+ .setColor(argb(0xFFFFFFFF.toInt()))
+ .build()
+ )
.build()
}
- return PrimaryLayout.Builder(requestParams.deviceConfiguration)
- .setContent(
- LayoutElementBuilders.Box.Builder()
- // 상 (UP): X는 그대로, Y만 위로(-45dp)
- .addContent(createNavButton("UP", "/gesture/up", 0f, -50f))
+ // 1층: 제일 위에 UP 버튼 하나만
+ val topRow = LayoutElementBuilders.Row.Builder()
+ .addContent(createNavButton("UP", "/gesture/up"))
+ .build()
- // 하 (DOWN): X는 그대로, Y만 아래로(+50dp)
- .addContent(createNavButton("DOWN", "/gesture/down", 0f, 50f))
+ // 2층: 중간에 LEFT, 빈 공간, RIGHT 배치
+ val middleRow = LayoutElementBuilders.Row.Builder()
+ .addContent(createNavButton("LEFT", "/gesture/left"))
+ // 좌우 버튼 사이의 널찍한 빈 공간 (UP/DOWN 버튼이 들어갈 간격)
+ .addContent(LayoutElementBuilders.Spacer.Builder().setWidth(dp(56f)).build())
+ .addContent(createNavButton("RIGHT", "/gesture/right"))
+ .build()
- // 좌 (LEFT): X를 왼쪽으로(-50dp), Y는 그대로
- .addContent(createNavButton("LEFT", "/gesture/left", -50f, 0f))
+ // 3층: 제일 아래에 DOWN 버튼 하나만
+ val bottomRow = LayoutElementBuilders.Row.Builder()
+ .addContent(createNavButton("DOWN", "/gesture/down"))
+ .build()
- // 우 (RIGHT): X를 오른쪽으로(+50dp), Y는 그대로
- .addContent(createNavButton("RIGHT", "/gesture/right", 50f, 0f))
- .build()
- ).build()
+ // 전체를 세로로 정렬하여 합체
+ return LayoutElementBuilders.Column.Builder()
+ .setHorizontalAlignment(LayoutElementBuilders.HORIZONTAL_ALIGN_CENTER) // 가운데 정렬 필수
+ .addContent(topRow)
+ .addContent(LayoutElementBuilders.Spacer.Builder().setHeight(dp(8f)).build()) // 1~2층 간격
+ .addContent(middleRow)
+ .addContent(LayoutElementBuilders.Spacer.Builder().setHeight(dp(8f)).build()) // 2~3층 간격
+ .addContent(bottomRow)
+ .build()
}