1
0
forked from icd/rentgen

Compare commits

...

2 Commits

Author SHA1 Message Date
7513594b00 refactor: replace math test with practical badge count test
Changed test from artificial computation ((17*2)+3=37) to real-world
functionality testing: counting third-party domains shown in badge.

Test flow:
1. Visit news.ycombinator.com → verify badge = 0 (no trackers)
2. Visit pudelek.pl → verify badge > 0 (has trackers)

Changes:
- background.ts: handler returns badgeCount from getClustersForOrigin()
- test-content-script.js: sends origin, stores badgeCount in DOM
- test-lib.js: testBadgeCount() replaces testBackgroundComputation()
- test_verify.py: tests two real sites instead of math computation

This tests actual Rentgen functionality: third-party domain detection.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-27 20:06:05 +00:00
9abc406d4a Revert "test: add verification script for ENABLE_TESTS mechanism"
This reverts commit 655b3b01ff9fa61fdb0e6eabedd07d02591f9dbb.
2025-10-27 20:01:14 +00:00
5 changed files with 81 additions and 178 deletions

View File

@ -1,4 +1,4 @@
import { init } from "./memory"; import { init, getMemory } from "./memory";
// Use global browser object directly (available in extension context) // Use global browser object directly (available in extension context)
declare const browser: any; declare const browser: any;
@ -7,23 +7,25 @@ declare const ENABLE_TESTS: boolean;
init(); init();
// Test verification handler for Marionette tests // Test verification handler for Marionette tests
// This proves the background script is executing and can communicate with content scripts // Tests real Rentgen functionality: counting third-party domains
if (ENABLE_TESTS) { if (ENABLE_TESTS) {
browser.runtime.onMessage.addListener((message: any, sender: any, sendResponse: any) => { browser.runtime.onMessage.addListener((message: any, sender: any, sendResponse: any) => {
if (message.type === 'RENTGEN_TEST_VERIFICATION') { if (message.type === 'RENTGEN_TEST_VERIFICATION') {
// Perform a computation to prove the background script is running // Get the origin from message (sent by content script)
// This is not just an echo - we're doing actual processing const origin = message.origin;
const inputValue = message.inputValue || 0;
const computed = (inputValue * 2) + 3;
// Send back a response with computed value and metadata // Access the memory to get clusters for this origin
const memory = getMemory();
const clusters = memory.getClustersForOrigin(origin);
const badgeCount = Object.keys(clusters).length;
// Send back the badge count (number of third-party domains)
const response = { const response = {
success: true, success: true,
computed: computed, badgeCount: badgeCount,
formula: `(${inputValue} * 2) + 3 = ${computed}`, origin: origin,
backgroundTimestamp: Date.now(), clusterIds: Object.keys(clusters),
receivedFrom: message.url || 'unknown', backgroundTimestamp: Date.now()
originalInput: inputValue
}; };
sendResponse(response); sendResponse(response);

View File

@ -27,33 +27,34 @@ document.addEventListener('rentgen_test_request', async (event) => {
// Extract test data from event // Extract test data from event
const testData = event.detail || {}; const testData = event.detail || {};
const inputValue = testData.value || 42;
const timestamp = testData.timestamp || Date.now(); const timestamp = testData.timestamp || Date.now();
// Send message to background script and wait for response // Send message to background script to get badge count for this origin
// This proves background script is running and responsive // This tests real Rentgen functionality: counting third-party domains
const response = await browser.runtime.sendMessage({ const response = await browser.runtime.sendMessage({
type: 'RENTGEN_TEST_VERIFICATION', type: 'RENTGEN_TEST_VERIFICATION',
inputValue: inputValue, origin: window.location.origin,
timestamp: timestamp,
url: window.location.href, url: window.location.href,
title: document.title title: document.title,
timestamp: timestamp
}); });
// Store the response from background in DOM // Store the response from background in DOM
// This provides undeniable proof of bidirectional communication // This provides the badge count (number of third-party domains)
if (response && response.success) { if (response && response.success) {
document.body.setAttribute('data-rentgen-verified', 'true'); document.body.setAttribute('data-rentgen-verified', 'true');
document.body.setAttribute('data-rentgen-computed', String(response.computed)); document.body.setAttribute('data-rentgen-badge-count', String(response.badgeCount));
document.body.setAttribute('data-rentgen-formula', response.formula); document.body.setAttribute('data-rentgen-origin', response.origin);
document.body.setAttribute('data-rentgen-cluster-ids', JSON.stringify(response.clusterIds));
document.body.setAttribute('data-rentgen-background-timestamp', String(response.backgroundTimestamp)); document.body.setAttribute('data-rentgen-background-timestamp', String(response.backgroundTimestamp));
// Also dispatch a custom event with the results // Also dispatch a custom event with the results
document.dispatchEvent(new CustomEvent('rentgen_test_complete', { document.dispatchEvent(new CustomEvent('rentgen_test_complete', {
detail: { detail: {
success: true, success: true,
computed: response.computed, badgeCount: response.badgeCount,
formula: response.formula, origin: response.origin,
clusterIds: response.clusterIds,
backgroundTimestamp: response.backgroundTimestamp backgroundTimestamp: response.backgroundTimestamp
} }
})); }));

View File

@ -23,11 +23,11 @@ async function injectTestContentScript() {
} }
/** /**
* Test that background script performs computation correctly * Test that background script returns badge count correctly
* @param {number} testValue - Input value for computation * Tests real Rentgen functionality: counting third-party domains
* @returns {Promise<number|null>} - Computed result or null on failure * @returns {Promise<number|null>} - Badge count (number of third-party domains) or null on failure
*/ */
async function testBackgroundComputation(testValue) { async function testBadgeCount() {
// Inject content script first // Inject content script first
const injected = await injectTestContentScript(); const injected = await injectTestContentScript();
if (!injected) { if (!injected) {
@ -36,19 +36,19 @@ async function testBackgroundComputation(testValue) {
// Dispatch test request to content script // Dispatch test request to content script
document.dispatchEvent(new CustomEvent('rentgen_test_request', { document.dispatchEvent(new CustomEvent('rentgen_test_request', {
detail: { value: testValue, timestamp: Date.now() } detail: { timestamp: Date.now() }
})); }));
// Wait for background response // Wait for background response with badge count
return new Promise((resolve) => { return new Promise((resolve) => {
let attempts = 0; let attempts = 0;
const checkInterval = setInterval(() => { const checkInterval = setInterval(() => {
attempts++; attempts++;
const computed = document.body.getAttribute('data-rentgen-computed'); const badgeCount = document.body.getAttribute('data-rentgen-badge-count');
if (computed) { if (badgeCount !== null) {
clearInterval(checkInterval); clearInterval(checkInterval);
resolve(parseInt(computed)); resolve(parseInt(badgeCount));
} else if (attempts > 50) { } else if (attempts > 50) {
clearInterval(checkInterval); clearInterval(checkInterval);
resolve(null); resolve(null);

View File

@ -1,9 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
test_verify.py - Minimal extension verification test test_verify.py - Extension badge count verification test
Verifies the extension background script is executing by testing Verifies Rentgen's core functionality: counting third-party domains.
bidirectional communication with a simple addition operation. Tests two scenarios:
1. Site without third-party domains (news.ycombinator.com) badge = 0
2. Site with third-party domains (pudelek.pl) badge > 0
""" """
import sys import sys
@ -50,8 +52,8 @@ def start_webext():
return webext.pid return webext.pid
def test_addition(): def test_badge_count():
"""Test background script via Marionette. Returns (success, result).""" """Test badge count for sites with/without third-party domains. Returns (success, results_dict)."""
try: try:
from marionette_driver.marionette import Marionette from marionette_driver.marionette import Marionette
@ -62,32 +64,47 @@ def test_addition():
client = Marionette(host='localhost', port=2828) client = Marionette(host='localhost', port=2828)
client.start_session() client.start_session()
# Navigate to any page (needed for content script injection)
client.navigate("https://example.com")
time.sleep(5)
# Test: background should compute (17 * 2) + 3 = 37
test_value = 17
expected = 37
# Load test library # Load test library
test_lib_path = os.path.join(os.path.dirname(__file__), 'test-lib.js') test_lib_path = os.path.join(os.path.dirname(__file__), 'test-lib.js')
with open(test_lib_path, 'r') as f: with open(test_lib_path, 'r') as f:
test_lib = f.read() test_lib = f.read()
# Execute test results = {}
result = client.execute_script(
test_lib + "\nreturn testBackgroundComputation(arguments[0]);", # Test 1: Site without third-party domains
script_args=[test_value], print(" Testing news.ycombinator.com (expected: 0 third-party domains)...")
client.navigate("https://news.ycombinator.com")
time.sleep(5) # Wait for page load and tracking detection
badge_count_hn = client.execute_script(
test_lib + "\nreturn testBadgeCount();",
script_timeout=10000 script_timeout=10000
) )
results['hn'] = badge_count_hn
print(f" → Badge count: {badge_count_hn}")
# Test 2: Site with third-party domains
print(" Testing pudelek.pl (expected: >0 third-party domains)...")
client.navigate("https://pudelek.pl")
time.sleep(10) # Wait longer for page load and tracking detection
badge_count_pudelek = client.execute_script(
test_lib + "\nreturn testBadgeCount();",
script_timeout=10000
)
results['pudelek'] = badge_count_pudelek
print(f" → Badge count: {badge_count_pudelek}")
client.close() client.close()
if result == expected: # Verify results
return True, expected if badge_count_hn is None or badge_count_pudelek is None:
return False, "Test timed out or failed to get badge count"
if badge_count_hn == 0 and badge_count_pudelek > 0:
return True, results
else: else:
return False, result return False, f"Unexpected results: HN={badge_count_hn} (expected 0), Pudelek={badge_count_pudelek} (expected >0)"
except Exception as e: except Exception as e:
return False, str(e) return False, str(e)
@ -107,17 +124,24 @@ def cleanup(xvfb_pid, webext_pid):
def main(): def main():
"""Main test.""" """Main test."""
print("Starting Rentgen badge count verification test...")
print()
xvfb_pid = start_xvfb() xvfb_pid = start_xvfb()
webext_pid = start_webext() webext_pid = start_webext()
success, result = test_addition() success, result = test_badge_count()
cleanup(xvfb_pid, webext_pid) cleanup(xvfb_pid, webext_pid)
print()
if not success: if not success:
print(red(f"FAIL: Expected 37, got {result}")) print(red(f"FAIL: {result}"))
return 1 return 1
print(f"PASS: Badge count test succeeded!")
print(f" - news.ycombinator.com: {result['hn']} third-party domains")
print(f" - pudelek.pl: {result['pudelek']} third-party domains")
return 0 return 0

View File

@ -1,124 +0,0 @@
#!/bin/bash
# Verification script for ENABLE_TESTS functionality
# This script tests that the extension behaves correctly with and without ENABLE_TESTS
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
echo "==================================="
echo "ENABLE_TESTS Verification Script"
echo "==================================="
echo ""
# Function to run test and capture result
run_test() {
local test_name="$1"
echo "Running: $test_name"
# Check if test-content-script.js exists in lib/tests/
if [ -f "lib/tests/test-content-script.js" ]; then
echo " ✓ test-content-script.js found in lib/tests/"
else
echo " ✗ test-content-script.js NOT found in lib/tests/"
fi
# Check if ENABLE_TESTS condition in background.js
if grep -q 'if (false)' lib/background.js 2>/dev/null; then
echo " ✓ Test code is disabled (if (false) found)"
elif grep -q 'if (true)' lib/background.js 2>/dev/null; then
echo " ✓ Test code is enabled (if (true) found)"
else
echo " ? Could not determine test code state"
fi
# If we had Docker working, we would run the actual test here
# python3 tests/test_verify.py 2>&1 | tail -5
# For now, we just check the build artifacts
echo ""
}
# Clean previous builds
echo "Cleaning previous builds..."
rm -rf lib/
echo ""
# Test 1: Production build (without ENABLE_TESTS)
echo -e "${YELLOW}TEST 1: Production Build (without ENABLE_TESTS)${NC}"
echo "================================================"
npm run build > /dev/null 2>&1
run_test "Production Build"
# Expected:
# - lib/tests/ should NOT exist
# - background.js should have 'if (false)'
if [ ! -d "lib/tests" ] && grep -q 'if (false)' lib/background.js 2>/dev/null; then
echo -e "${GREEN}✓ PASS: Production build correctly excludes test code${NC}"
else
echo -e "${RED}✗ FAIL: Production build still contains test code${NC}"
fi
echo ""
# Clean for next test
rm -rf lib/
# Test 2: Test build (with ENABLE_TESTS=true)
echo -e "${YELLOW}TEST 2: Test Build (with ENABLE_TESTS=true)${NC}"
echo "============================================="
ENABLE_TESTS=true npm run build > /dev/null 2>&1
run_test "Test Build"
# Expected:
# - lib/tests/test-content-script.js should exist
# - background.js should have 'if (true)'
if [ -f "lib/tests/test-content-script.js" ] && grep -q 'if (true)' lib/background.js 2>/dev/null; then
echo -e "${GREEN}✓ PASS: Test build correctly includes test code${NC}"
else
echo -e "${RED}✗ FAIL: Test build missing test code${NC}"
fi
echo ""
# Summary
echo "==================================="
echo "SUMMARY"
echo "==================================="
echo ""
# Check both conditions for final verdict
PROD_OK=false
TEST_OK=false
# Re-test production build
rm -rf lib/
npm run build > /dev/null 2>&1
if [ ! -d "lib/tests" ] && grep -q 'if (false)' lib/background.js 2>/dev/null; then
PROD_OK=true
fi
# Re-test test build
rm -rf lib/
ENABLE_TESTS=true npm run build > /dev/null 2>&1
if [ -f "lib/tests/test-content-script.js" ] && grep -q 'if (true)' lib/background.js 2>/dev/null; then
TEST_OK=true
fi
if $PROD_OK && $TEST_OK; then
echo -e "${GREEN}✓ SUCCESS: ENABLE_TESTS mechanism works correctly!${NC}"
echo " - Production builds exclude test code"
echo " - Test builds include test code"
exit 0
else
echo -e "${RED}✗ FAILURE: ENABLE_TESTS mechanism has issues${NC}"
if ! $PROD_OK; then
echo " - Production build problem detected"
fi
if ! $TEST_OK; then
echo " - Test build problem detected"
fi
exit 1
fi