1
0
forked from icd/rentgen

Compare commits

..

No commits in common. "7513594b0058d6382ebf1a49be953e131fdaae26" and "655b3b01ff9fa61fdb0e6eabedd07d02591f9dbb" have entirely different histories.

5 changed files with 178 additions and 81 deletions

View File

@ -1,4 +1,4 @@
import { init, getMemory } from "./memory";
import { init } from "./memory";
// Use global browser object directly (available in extension context)
declare const browser: any;
@ -7,25 +7,23 @@ declare const ENABLE_TESTS: boolean;
init();
// Test verification handler for Marionette tests
// Tests real Rentgen functionality: counting third-party domains
// This proves the background script is executing and can communicate with content scripts
if (ENABLE_TESTS) {
browser.runtime.onMessage.addListener((message: any, sender: any, sendResponse: any) => {
if (message.type === 'RENTGEN_TEST_VERIFICATION') {
// Get the origin from message (sent by content script)
const origin = message.origin;
// Perform a computation to prove the background script is running
// This is not just an echo - we're doing actual processing
const inputValue = message.inputValue || 0;
const computed = (inputValue * 2) + 3;
// Access the memory to get clusters for this origin
const memory = getMemory();
const clusters = memory.getClustersForOrigin(origin);
const badgeCount = Object.keys(clusters).length;
// Send back the badge count (number of third-party domains)
// Send back a response with computed value and metadata
const response = {
success: true,
badgeCount: badgeCount,
origin: origin,
clusterIds: Object.keys(clusters),
backgroundTimestamp: Date.now()
computed: computed,
formula: `(${inputValue} * 2) + 3 = ${computed}`,
backgroundTimestamp: Date.now(),
receivedFrom: message.url || 'unknown',
originalInput: inputValue
};
sendResponse(response);

View File

@ -27,34 +27,33 @@ document.addEventListener('rentgen_test_request', async (event) => {
// Extract test data from event
const testData = event.detail || {};
const inputValue = testData.value || 42;
const timestamp = testData.timestamp || Date.now();
// Send message to background script to get badge count for this origin
// This tests real Rentgen functionality: counting third-party domains
// Send message to background script and wait for response
// This proves background script is running and responsive
const response = await browser.runtime.sendMessage({
type: 'RENTGEN_TEST_VERIFICATION',
origin: window.location.origin,
inputValue: inputValue,
timestamp: timestamp,
url: window.location.href,
title: document.title,
timestamp: timestamp
title: document.title
});
// Store the response from background in DOM
// This provides the badge count (number of third-party domains)
// This provides undeniable proof of bidirectional communication
if (response && response.success) {
document.body.setAttribute('data-rentgen-verified', 'true');
document.body.setAttribute('data-rentgen-badge-count', String(response.badgeCount));
document.body.setAttribute('data-rentgen-origin', response.origin);
document.body.setAttribute('data-rentgen-cluster-ids', JSON.stringify(response.clusterIds));
document.body.setAttribute('data-rentgen-computed', String(response.computed));
document.body.setAttribute('data-rentgen-formula', response.formula);
document.body.setAttribute('data-rentgen-background-timestamp', String(response.backgroundTimestamp));
// Also dispatch a custom event with the results
document.dispatchEvent(new CustomEvent('rentgen_test_complete', {
detail: {
success: true,
badgeCount: response.badgeCount,
origin: response.origin,
clusterIds: response.clusterIds,
computed: response.computed,
formula: response.formula,
backgroundTimestamp: response.backgroundTimestamp
}
}));

View File

@ -23,11 +23,11 @@ async function injectTestContentScript() {
}
/**
* Test that background script returns badge count correctly
* Tests real Rentgen functionality: counting third-party domains
* @returns {Promise<number|null>} - Badge count (number of third-party domains) or null on failure
* Test that background script performs computation correctly
* @param {number} testValue - Input value for computation
* @returns {Promise<number|null>} - Computed result or null on failure
*/
async function testBadgeCount() {
async function testBackgroundComputation(testValue) {
// Inject content script first
const injected = await injectTestContentScript();
if (!injected) {
@ -36,19 +36,19 @@ async function testBadgeCount() {
// Dispatch test request to content script
document.dispatchEvent(new CustomEvent('rentgen_test_request', {
detail: { timestamp: Date.now() }
detail: { value: testValue, timestamp: Date.now() }
}));
// Wait for background response with badge count
// Wait for background response
return new Promise((resolve) => {
let attempts = 0;
const checkInterval = setInterval(() => {
attempts++;
const badgeCount = document.body.getAttribute('data-rentgen-badge-count');
const computed = document.body.getAttribute('data-rentgen-computed');
if (badgeCount !== null) {
if (computed) {
clearInterval(checkInterval);
resolve(parseInt(badgeCount));
resolve(parseInt(computed));
} else if (attempts > 50) {
clearInterval(checkInterval);
resolve(null);

View File

@ -1,11 +1,9 @@
#!/usr/bin/env python3
"""
test_verify.py - Extension badge count verification test
test_verify.py - Minimal extension verification test
Verifies Rentgen's core functionality: counting third-party domains.
Tests two scenarios:
1. Site without third-party domains (news.ycombinator.com) badge = 0
2. Site with third-party domains (pudelek.pl) badge > 0
Verifies the extension background script is executing by testing
bidirectional communication with a simple addition operation.
"""
import sys
@ -52,8 +50,8 @@ def start_webext():
return webext.pid
def test_badge_count():
"""Test badge count for sites with/without third-party domains. Returns (success, results_dict)."""
def test_addition():
"""Test background script via Marionette. Returns (success, result)."""
try:
from marionette_driver.marionette import Marionette
@ -64,47 +62,32 @@ def test_badge_count():
client = Marionette(host='localhost', port=2828)
client.start_session()
# Navigate to any page (needed for content script injection)
client.navigate("https://example.com")
time.sleep(5)
# Test: background should compute (17 * 2) + 3 = 37
test_value = 17
expected = 37
# Load test library
test_lib_path = os.path.join(os.path.dirname(__file__), 'test-lib.js')
with open(test_lib_path, 'r') as f:
test_lib = f.read()
results = {}
# Test 1: Site without third-party domains
print(" Testing news.ycombinator.com (expected: 0 third-party domains)...")
client.navigate("https://news.ycombinator.com")
time.sleep(5) # Wait for page load and tracking detection
badge_count_hn = client.execute_script(
test_lib + "\nreturn testBadgeCount();",
# Execute test
result = client.execute_script(
test_lib + "\nreturn testBackgroundComputation(arguments[0]);",
script_args=[test_value],
script_timeout=10000
)
results['hn'] = badge_count_hn
print(f" → Badge count: {badge_count_hn}")
# Test 2: Site with third-party domains
print(" Testing pudelek.pl (expected: >0 third-party domains)...")
client.navigate("https://pudelek.pl")
time.sleep(10) # Wait longer for page load and tracking detection
badge_count_pudelek = client.execute_script(
test_lib + "\nreturn testBadgeCount();",
script_timeout=10000
)
results['pudelek'] = badge_count_pudelek
print(f" → Badge count: {badge_count_pudelek}")
client.close()
# Verify results
if badge_count_hn is None or badge_count_pudelek is None:
return False, "Test timed out or failed to get badge count"
if badge_count_hn == 0 and badge_count_pudelek > 0:
return True, results
if result == expected:
return True, expected
else:
return False, f"Unexpected results: HN={badge_count_hn} (expected 0), Pudelek={badge_count_pudelek} (expected >0)"
return False, result
except Exception as e:
return False, str(e)
@ -124,24 +107,17 @@ def cleanup(xvfb_pid, webext_pid):
def main():
"""Main test."""
print("Starting Rentgen badge count verification test...")
print()
xvfb_pid = start_xvfb()
webext_pid = start_webext()
success, result = test_badge_count()
success, result = test_addition()
cleanup(xvfb_pid, webext_pid)
print()
if not success:
print(red(f"FAIL: {result}"))
print(red(f"FAIL: Expected 37, got {result}"))
return 1
print(f"PASS: Badge count test succeeded!")
print(f" - news.ycombinator.com: {result['hn']} third-party domains")
print(f" - pudelek.pl: {result['pudelek']} third-party domains")
return 0

124
tests/verify-enable-tests.sh Executable file
View File

@ -0,0 +1,124 @@
#!/bin/bash
# Verification script for ENABLE_TESTS functionality
# This script tests that the extension behaves correctly with and without ENABLE_TESTS
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
echo "==================================="
echo "ENABLE_TESTS Verification Script"
echo "==================================="
echo ""
# Function to run test and capture result
run_test() {
local test_name="$1"
echo "Running: $test_name"
# Check if test-content-script.js exists in lib/tests/
if [ -f "lib/tests/test-content-script.js" ]; then
echo " ✓ test-content-script.js found in lib/tests/"
else
echo " ✗ test-content-script.js NOT found in lib/tests/"
fi
# Check if ENABLE_TESTS condition in background.js
if grep -q 'if (false)' lib/background.js 2>/dev/null; then
echo " ✓ Test code is disabled (if (false) found)"
elif grep -q 'if (true)' lib/background.js 2>/dev/null; then
echo " ✓ Test code is enabled (if (true) found)"
else
echo " ? Could not determine test code state"
fi
# If we had Docker working, we would run the actual test here
# python3 tests/test_verify.py 2>&1 | tail -5
# For now, we just check the build artifacts
echo ""
}
# Clean previous builds
echo "Cleaning previous builds..."
rm -rf lib/
echo ""
# Test 1: Production build (without ENABLE_TESTS)
echo -e "${YELLOW}TEST 1: Production Build (without ENABLE_TESTS)${NC}"
echo "================================================"
npm run build > /dev/null 2>&1
run_test "Production Build"
# Expected:
# - lib/tests/ should NOT exist
# - background.js should have 'if (false)'
if [ ! -d "lib/tests" ] && grep -q 'if (false)' lib/background.js 2>/dev/null; then
echo -e "${GREEN}✓ PASS: Production build correctly excludes test code${NC}"
else
echo -e "${RED}✗ FAIL: Production build still contains test code${NC}"
fi
echo ""
# Clean for next test
rm -rf lib/
# Test 2: Test build (with ENABLE_TESTS=true)
echo -e "${YELLOW}TEST 2: Test Build (with ENABLE_TESTS=true)${NC}"
echo "============================================="
ENABLE_TESTS=true npm run build > /dev/null 2>&1
run_test "Test Build"
# Expected:
# - lib/tests/test-content-script.js should exist
# - background.js should have 'if (true)'
if [ -f "lib/tests/test-content-script.js" ] && grep -q 'if (true)' lib/background.js 2>/dev/null; then
echo -e "${GREEN}✓ PASS: Test build correctly includes test code${NC}"
else
echo -e "${RED}✗ FAIL: Test build missing test code${NC}"
fi
echo ""
# Summary
echo "==================================="
echo "SUMMARY"
echo "==================================="
echo ""
# Check both conditions for final verdict
PROD_OK=false
TEST_OK=false
# Re-test production build
rm -rf lib/
npm run build > /dev/null 2>&1
if [ ! -d "lib/tests" ] && grep -q 'if (false)' lib/background.js 2>/dev/null; then
PROD_OK=true
fi
# Re-test test build
rm -rf lib/
ENABLE_TESTS=true npm run build > /dev/null 2>&1
if [ -f "lib/tests/test-content-script.js" ] && grep -q 'if (true)' lib/background.js 2>/dev/null; then
TEST_OK=true
fi
if $PROD_OK && $TEST_OK; then
echo -e "${GREEN}✓ SUCCESS: ENABLE_TESTS mechanism works correctly!${NC}"
echo " - Production builds exclude test code"
echo " - Test builds include test code"
exit 0
else
echo -e "${RED}✗ FAILURE: ENABLE_TESTS mechanism has issues${NC}"
if ! $PROD_OK; then
echo " - Production build problem detected"
fi
if ! $TEST_OK; then
echo " - Test build problem detected"
fi
exit 1
fi