Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Shared log counter #2

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
A simple and efficient logging server designed to run inside the host machine to stream logs from the AWS Nitro Enclave.

1. GET `logs/stream` -- Creates an SSE connection which can be listened by the client to access realtime logs
2. GET `/logs/history?log_id=50&offset=10` -- Responds with the logs starting with `offset` number of logs starting from log id = `log_id-1`
3. GET `/logs` -- Server html file in response to both stream and view log history out of the box
2. GET `/logs/history?log_id=50&offset=10` -- Responds with the logs starting with `offset` number of logs starting from log id = `log_id-1`. Omitting log_id param will provide latest produced offset number of logs.
3. GET `logs/tail-log-id` -- Responds with the id of the last received log
4. GET `/logs` -- Renders html page to both stream logs realtime and retrieve existing logs.

### Building the Server

Expand Down
61 changes: 45 additions & 16 deletions assets/logs.html
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,22 @@
line-height: 2em;
"
>
<div
<li
id="loader"
style="display: none; text-align: center; margin-top: 20px"
>
Loading...
</div>
</li>
</ul>
</body>
<script defer>
const PORT = 516;
const LOG_OFFSET = 10;
const NUMBERS = "0123456789";
const logs = document.getElementById("logs");
const loader = document.getElementById("loader");
let isHistoryView = false;
let autoScrollToBottom = true;
let lastScrollTop = 0;

const stripLogPrefix = (log) => {
const parts = log.split("] ");
Expand All @@ -53,29 +55,39 @@
};

const historyLogs = (url, logId) => {
fetch(`${url}/logs/history?log_id=${logId}&offset=${LOG_OFFSET}`)
// If there is a logId, then fetch the logs from that logId
const params = logId
? new URLSearchParams({ log_id: logId, offset: LOG_OFFSET })
: new URLSearchParams({ offset: LOG_OFFSET });
fetch(`${url}/logs/history?${params}`)
.then((response) => response.json())
.then((data) => {
const logData = data.reverse();
logData.forEach((log, index) => {
let newLog = stripLogPrefix(log);
let newLogId = Number(logId) - (index + 1);
let messageElement = document.createElement("li");
messageElement.id = newLogId;
messageElement.id = Number(newLog?.id || generateRandomString());
messageElement.textContent = newLog.message;
logs.prepend(messageElement);
});
// After adding history logs, scroll to the top to prevent jumping to the bottom
logs.scrollTop = 0;
isHistoryView = true;
logs.scrollTop = 20;
});
};

function generateRandomString(length = 6) {
let result = "";
for (let i = 0; i < length; i++) {
result += NUMBERS.charAt(Math.floor(Math.random() * NUMBERS.length));
}
return result;
}

const startLogs = (url) => {
historyLogs(url);
let eventSource = new EventSource(`${url}/logs/stream`);

eventSource.onmessage = async (event) => {
if(loader){
if (loader) {
loader.remove();
}
let log = stripLogPrefix(event.data);
Expand All @@ -84,28 +96,45 @@
messageElement.id = log.id;
messageElement.textContent = message;
logs.appendChild(messageElement);
// Scroll to the bottom after adding new logs to ensure the user sees the latest logs
if(!isHistoryView){
logs.scrollTop += messageElement.offsetHeight;
if (autoScrollToBottom) {
logs.scrollTop = logs.scrollHeight;
}
};

eventSource.onerror = (error) => {
console.error("EventSource failed:", error);
eventSource.close();
logs.removeChild(loader);
if(loader){
loader.remove()
if (loader) {
loader.remove();
}
};
};

logs.addEventListener("scroll", () => {
if (logs.scrollTop === 0) {
const scrollTop = logs.scrollTop;
const scrollHeight = logs.scrollHeight;
const clientHeight = logs.clientHeight;

// Check if scrolling upwards then stop auto scroll to bottom
if (scrollTop < lastScrollTop) {
autoScrollToBottom = false;
}

// Check if at the top
if (scrollTop === 0) {
let firstLog = document.getElementById("logs").firstElementChild;
let firstLogId = firstLog.id;
autoScrollToBottom = false;
historyLogs(`http://${window.location.hostname}:${PORT}`, firstLogId);
}

// Check if at the bottom then start auto scroll to bottom
if (scrollTop + clientHeight >= scrollHeight) {
autoScrollToBottom = true;
}

lastScrollTop = scrollTop;
});

startLogs(`http://${window.location.hostname}:${PORT}`);
Expand Down
14 changes: 9 additions & 5 deletions src/enclave_monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,23 @@ use crate::logging::log_message;
use anyhow::{bail, Context};
use serde_json::Value;
use std::process::Stdio;
use std::sync::Arc;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::Command;
use tokio::sync::broadcast;
use tokio::sync::{broadcast, Mutex};

pub async fn monitor_and_capture_logs(
sse_tx: &broadcast::Sender<String>,
enclave_log_file_path: &str,
script_log_file_path: &str,
target_cid: u64,
log_counter: Arc<Mutex<u64>>,
) -> anyhow::Result<()> {
loop {
let enclave_id = wait_for_enclave_with_cid(target_cid)
.await
.context("Error in wait for enclave with cid call")?;
let log_counter = log_counter.clone();
log_message(
script_log_file_path,
&format!(
Expand All @@ -29,6 +32,7 @@ pub async fn monitor_and_capture_logs(
&enclave_id,
enclave_log_file_path,
script_log_file_path,
log_counter,
)
.await
{
Expand Down Expand Up @@ -76,15 +80,14 @@ async fn capture_logs(
enclave_id: &str,
enclave_log_file_path: &str,
script_log_file_path: &str,
log_counter: Arc<Mutex<u64>>,
) -> anyhow::Result<()> {
let mut child = Command::new("nitro-cli")
.args(["console", "--enclave-id", enclave_id])
.stdout(Stdio::piped())
.spawn()
.context("Failed to spawn nitro-cli process")?;

let mut log_id_counter: u64 = 0;

// Open the file asynchronously
let mut file = tokio::fs::OpenOptions::new()
.create(true)
Expand All @@ -97,7 +100,8 @@ async fn capture_logs(
let mut reader = BufReader::new(stdout).lines();

while let Some(line) = reader.next_line().await? {
let log_entry = format!("[{}] {}", log_id_counter, line);
let mut log_counter = log_counter.lock().await;
let log_entry = format!("[{}] {}", *log_counter, line);

{
file.write_all(log_entry.as_bytes()).await?;
Expand All @@ -109,7 +113,7 @@ async fn capture_logs(
println!("No active SSE subscribers, skipping log transmission.");
}

log_id_counter += 1;
*log_counter += 1;
}

let status = child.wait().await?;
Expand Down
27 changes: 24 additions & 3 deletions src/http_server.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
use anyhow::{anyhow, Context};
use serde_json::json;
use std::collections::HashMap;
use std::convert::Infallible;
use std::{collections::HashMap, sync::Arc};
use tokio::{
fs::File,
io::{AsyncBufReadExt, BufReader},
sync::Mutex,
};
use warp::{http::Method, Filter};

Expand Down Expand Up @@ -50,24 +51,40 @@ pub async fn fetch_logs_with_offset(
pub fn create_routes(
enclave_log_file_path: String,
sse_tx: tokio::sync::broadcast::Sender<String>,
log_counter: Arc<Mutex<u64>>,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
let logs_file = enclave_log_file_path.clone();
let log_counter1 = log_counter.clone();
let log_counter2 = log_counter.clone();
let home_html = include_str!("../assets/logs.html");

let home_route = warp::path("logs")
.and(warp::get())
.map(move || warp::reply::html(home_html));

let tail_log_route = warp::path("logs")
.and(warp::path("tail-log-id"))
.and(warp::get())
.and_then(move || {
let log_counter = log_counter1.clone();
async move {
let latest_log_id = log_counter.lock().await;
Ok::<_, Infallible>(warp::reply::json(&json!({"log_id": *latest_log_id})))
}
});

let history_route = warp::path("logs")
.and(warp::path("history"))
.and(warp::query::<HashMap<String, String>>())
.and_then(move |params: HashMap<String, String>| {
let logs_file = logs_file.clone();
let log_counter = log_counter2.clone();
async move {
let latest_log_id = log_counter.lock().await;
let log_id = params
.get("log_id")
.and_then(|id| id.parse::<u64>().ok())
.unwrap_or(1);
.unwrap_or(*latest_log_id);

let offset = params
.get("offset")
Expand Down Expand Up @@ -127,5 +144,9 @@ pub fn create_routes(
Method::HEAD,
]);

history_route.or(sse_route).or(home_route).with(cors)
history_route
.or(tail_log_route)
.or(sse_route)
.or(home_route)
.with(cors)
}
23 changes: 18 additions & 5 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,19 @@ mod enclave_monitor;
mod http_server;
mod logging;

use std::sync::Arc;

use anyhow::Context;
use args::Args;
use clap::Parser;
use enclave_monitor::monitor_and_capture_logs;
use logging::{clear_log_file, log_message};
use tokio::sync::broadcast;
use tokio::sync::{broadcast, Mutex};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
let log_counter: Arc<Mutex<u64>> = Arc::new(Mutex::new(0));

clear_log_file(&args.enclave_log_file_path)
.await
Expand All @@ -30,11 +33,17 @@ async fn main() -> anyhow::Result<()> {
let script_log_file = args.script_log_file_path.clone();
let enclave_log_file = args.enclave_log_file_path.clone();
let target_cid = args.target_cid;
let log_counter = Arc::clone(&log_counter);

tokio::task::spawn(async move {
if let Err(e) =
monitor_and_capture_logs(&sse_tx, &enclave_log_file, &script_log_file, target_cid)
.await
if let Err(e) = monitor_and_capture_logs(
&sse_tx,
&enclave_log_file,
&script_log_file,
target_cid,
log_counter,
)
.await
{
// Ensure you await the async function
let _ = log_message(
Expand All @@ -46,7 +55,11 @@ async fn main() -> anyhow::Result<()> {
});
}

let routes = http_server::create_routes(args.enclave_log_file_path.clone(), sse_tx.clone());
let routes = http_server::create_routes(
args.enclave_log_file_path.clone(),
sse_tx.clone(),
log_counter,
);

log_message(
&args.script_log_file_path,
Expand Down