Optimization of patterns in high-frequency operation scenarios
Optimizing patterns for high-frequency operation scenarios is key to improving the performance of JavaScript applications. These scenarios typically involve frequently triggered event handling, animation rendering, or data updates, which can easily lead to performance bottlenecks if not handled properly. By judiciously applying design patterns, computational overhead can be reduced, redundant rendering avoided, and resource scheduling optimized.
Throttling and Debouncing Patterns
High-frequency events like scroll
, resize
, or mousemove
require special handling. Debouncing ensures only the last execution occurs during continuous triggers:
function debounce(fn, delay) {
let timer;
return function(...args) {
clearTimeout(timer);
timer = setTimeout(() => fn.apply(this, args), delay);
};
}
window.addEventListener('resize', debounce(() => {
console.log('Executed after window size stabilizes');
}, 300));
Throttling limits execution frequency, ensuring only one trigger occurs within a fixed time interval:
function throttle(fn, interval) {
let lastTime = 0;
return function(...args) {
const now = Date.now();
if (now - lastTime >= interval) {
fn.apply(this, args);
lastTime = now;
}
};
}
document.addEventListener('mousemove', throttle((e) => {
console.log(`Mouse position: ${e.clientX},${e.clientY}`);
}, 100));
Virtual List Rendering Pattern
For long list rendering, virtual list technology can be employed to render only elements within the visible area. The following implementation uses dynamic calculation:
class VirtualList {
constructor(container, itemHeight, renderItem) {
this.container = container;
this.itemHeight = itemHeight;
this.renderItem = renderItem;
this.data = [];
this.visibleItems = [];
container.style.overflow = 'auto';
container.addEventListener('scroll', () => this.updateVisibleItems());
}
setData(newData) {
this.data = newData;
this.container.style.height = `${this.data.length * this.itemHeight}px`;
this.updateVisibleItems();
}
updateVisibleItems() {
const scrollTop = this.container.scrollTop;
const startIdx = Math.floor(scrollTop / this.itemHeight);
const endIdx = Math.min(
startIdx + Math.ceil(this.container.clientHeight / this.itemHeight),
this.data.length
);
this.visibleItems = this.data.slice(startIdx, endIdx);
this.renderItems(startIdx);
}
renderItems(offset) {
this.container.innerHTML = '';
this.visibleItems.forEach((item, i) => {
const element = this.renderItem(item);
element.style.position = 'absolute';
element.style.top = `${(offset + i) * this.itemHeight}px`;
this.container.appendChild(element);
});
}
}
Object Pool Pattern
When objects are frequently created and destroyed, an object pool can significantly reduce GC pressure. Suitable for scenarios like particle systems:
class ObjectPool {
constructor(createFn, resetFn = (obj) => obj) {
this.createFn = createFn;
this.resetFn = resetFn;
this.freeList = [];
this.activeCount = 0;
}
acquire() {
const obj = this.freeList.pop() || this.createFn();
this.activeCount++;
return obj;
}
release(obj) {
this.resetFn(obj);
this.freeList.push(obj);
this.activeCount--;
}
}
// Usage example
const particlePool = new ObjectPool(
() => ({ x: 0, y: 0, alpha: 1 }),
(p) => { p.alpha = 1; }
);
function createExplosion() {
for (let i = 0; i < 100; i++) {
const p = particlePool.acquire();
p.x = Math.random() * canvas.width;
p.y = Math.random() * canvas.height;
animateParticle(p, () => particlePool.release(p));
}
}
Incremental Processing Pattern
Large datasets can be processed in chunks to avoid blocking the main thread:
function processInChunks(items, chunkSize, processItem, callback) {
let index = 0;
function nextChunk() {
const end = Math.min(index + chunkSize, items.length);
while (index < end) {
processItem(items[index++]);
}
if (index < items.length) {
requestAnimationFrame(nextChunk);
} else {
callback?.();
}
}
nextChunk();
}
// Usage example
processInChunks(
largeArray,
100,
item => { /* Process individual item */ },
() => console.log('Processing complete')
);
State Batching Pattern
State updates in frameworks like React can be optimized through batching:
class BatchUpdater {
constructor(component) {
this.component = component;
this.pendingStates = [];
this.isBatching = false;
}
setState(update) {
this.pendingStates.push(update);
if (!this.isBatching) {
this.flush();
}
}
batchUpdates(callback) {
this.isBatching = true;
callback();
this.flush();
this.isBatching = false;
}
flush() {
if (this.pendingStates.length > 0) {
this.component.setState(prevState =>
this.pendingStates.reduce(
(state, update) => ({ ...state, ...update }),
prevState
)
);
this.pendingStates = [];
}
}
}
// Usage example
const updater = new BatchUpdater(component);
updater.batchUpdates(() => {
updater.setState({ count: 1 });
updater.setState({ loading: true });
fetchData().then(() => {
updater.setState({ data: res, loading: false });
});
});
Cache Proxy Pattern
Compute-intensive operations can be optimized through caching proxies:
function createCacheProxy(fn, keyFn = JSON.stringify) {
const cache = new Map();
return function(...args) {
const key = keyFn(args);
if (cache.has(key)) {
return cache.get(key);
}
const result = fn.apply(this, args);
cache.set(key, result);
return result;
};
}
// Usage example
const heavyCompute = (n) => { /* Complex computation */ };
const cachedCompute = createCacheProxy(heavyCompute);
console.log(cachedCompute(5)); // First computation
console.log(cachedCompute(5)); // Returns cached result
Observer Pattern Optimization
Traditional observer patterns can incorporate change merging for high-frequency events:
class OptimizedObservable {
constructor() {
this.observers = new Set();
this.pendingNotifications = new Set();
this.notificationScheduled = false;
}
addObserver(observer) {
this.observers.add(observer);
}
notifyObservers() {
this.pendingNotifications.add(...arguments);
if (!this.notificationScheduled) {
this.notificationScheduled = true;
requestAnimationFrame(() => {
const args = [...this.pendingNotifications];
this.pendingNotifications.clear();
this.observers.forEach(observer => observer(...args));
this.notificationScheduled = false;
});
}
}
}
Lazy Loading Pattern
Resource-intensive modules can adopt on-demand loading strategies:
const lazyLoader = {
loadedModules: new Map(),
load(moduleName) {
if (this.loadedModules.has(moduleName)) {
return Promise.resolve(this.loadedModules.get(moduleName));
}
return import(`./modules/${moduleName}.js`)
.then(module => {
this.loadedModules.set(moduleName, module);
return module;
});
}
};
// Usage example
document.getElementById('feature-btn').addEventListener('click', () => {
lazyLoader.load('expensiveFeature').then(module => {
module.init();
});
});
Time Slicing Pattern
Breaking long tasks into multiple microtasks maintains UI responsiveness:
function timeSlicedTask(task, chunkSize = 10) {
let index = 0;
const total = task.length;
function nextChunk() {
const start = performance.now();
while (index < total && performance.now() - start < 16) {
task[index++]();
}
if (index < total) {
return Promise.resolve().then(nextChunk);
}
}
return nextChunk();
}
// Usage example
timeSlicedTask(
Array(1000).fill().map((_, i) => () => {
const element = document.createElement('div');
element.textContent = `Item ${i}`;
document.body.appendChild(element);
})
);
本站部分内容来自互联网,一切版权均归源网站或源作者所有。
如果侵犯了你的权益请来信告知我们删除。邮箱:cc@cccx.cn
上一篇:垃圾回收机制对设计模式的影响
下一篇:移动端环境下的模式调整