Skip to content
This repository was archived by the owner on Jun 30, 2022. It is now read-only.

Commit f84b9d9

Browse files
charlesccychenaaltay
authored andcommitted
Undo introduction of OperationCounters.should_sample
Regression in performance suggests we need to revise our approach ----Release Notes---- [] ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=123439180
1 parent 2b670d8 commit f84b9d9

2 files changed

Lines changed: 11 additions & 113 deletions

File tree

google/cloud/dataflow/worker/opcounters.py

Lines changed: 11 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -15,109 +15,40 @@
1515
"""Counters collect the progress of the Worker for reporting to the service."""
1616

1717
from __future__ import absolute_import
18-
import math
19-
import random
2018

21-
from google.cloud.dataflow.coders import WindowedValueCoder
22-
from google.cloud.dataflow.transforms.window import WindowedValue
2319
from google.cloud.dataflow.utils.counters import Counter
2420

2521

2622
class OperationCounters(object):
2723
"""The set of basic counters to attach to an Operation."""
2824

2925
def __init__(self, counter_factory, step_name, coder, output_index):
30-
self._counter_factory = counter_factory
3126
self.element_counter = counter_factory.get_counter(
3227
'%s-out%d-ElementCount' % (step_name, output_index), Counter.SUM)
3328
self.mean_byte_counter = counter_factory.get_counter(
3429
'%s-out%d-MeanByteCount' % (step_name, output_index), Counter.MEAN)
3530
self.coder = coder
36-
self._active_accumulators = []
37-
self._sample_counter = 0
38-
self._next_sample = 0
3931

40-
def update_from(self, windowed_value, coder=None):
32+
def update_from(self, windowed_value, coder=None): # pylint: disable=unused-argument
4133
"""Add one value to this counter."""
4234
self.element_counter.update(1)
43-
if self.should_sample():
44-
byte_size_accumulator = self._counter_factory.get_counter(
45-
'%s-temp%d' % (self.mean_byte_counter.name, self._sample_counter),
46-
Counter.SUM)
47-
self._active_accumulators.append(byte_size_accumulator)
48-
# Shuffle operations may pass in their own coder
49-
if coder is None:
50-
coder = self.coder
51-
# Some Readers and Writers return windowed values even
52-
# though their output encoding does not claim to be windowed.
53-
# TODO(ccy): fix output encodings to be consistent here
54-
if (isinstance(windowed_value, WindowedValue)
55-
and not isinstance(coder, WindowedValueCoder)):
56-
coder = WindowedValueCoder(coder)
57-
# TODO(gildea):
58-
# Actually compute the encoded size of this value:
59-
# coder.store_estimated_size(windowed_value, byte_size_accumulator)
35+
# TODO(silviuc): Implement estimated size sampling.
36+
# TODO(gildea):
37+
# Actually compute the encoded size of this value.
38+
# In spirit, something like this:
39+
# if coder is None:
40+
# coder = self.coder
41+
# coder.store_estimated_size(windowed_value, byte_size_accumulator)
42+
# but will need to do sampling.
6043

6144
def update_collect(self):
6245
"""Collects the accumulated size estimates.
6346
6447
Now that the element has been processed, we ask our accumulator
6548
for the total and store the result in a counter.
6649
"""
67-
for pending in self._active_accumulators:
68-
self.mean_byte_counter.update(pending.value())
69-
self._active_accumulators = []
70-
71-
def should_sample(self):
72-
"""Determines whether to sample the next element.
73-
74-
Size calculation can be expensive, so we don't do it for each element.
75-
Because we need only an estimate of average size, we sample.
76-
77-
We always sample the first 10 elements, then the sampling rate
78-
is approximately 10/N. After reading N elements, of the next N,
79-
we will sample approximately 10*ln(2) (about 7) elements.
80-
81-
This algorithm samples at the same rate as Reservoir Sampling, but
82-
it never throws away early results. (Because we keep only a
83-
running accumulation, storage is not a problem, so there is no
84-
need to discard earlier calculations.)
85-
86-
Because we accumulate and do not replace, our statistics are
87-
biased toward early data. If the data are distributed uniformly,
88-
this is not a problem. If the data change over time (i.e., the
89-
element size tends to grow or shrink over time), our estimate will
90-
show the bias. We could correct this by giving weight N to each
91-
sample, since each sample is a stand-in for the N/(10*ln(2))
92-
samples around it, which is proportional to N. Since we do not
93-
expect biased data, for efficiency we omit the extra multiplication.
94-
We could reduce the early-data bias by putting a lower bound on
95-
the sampling rate.
96-
97-
Computing random.randint(1, self._sample_counter) for each element
98-
is too slow, so when the sample size is big enough (we estimate 30
99-
is big enough), we estimate the size of the gap after each sample.
100-
This estimation allows us to call random much less often.
101-
102-
Returns:
103-
True if it is time to compute another element's size.
104-
"""
105-
def compute_next_sample(i):
106-
# https://en.wikipedia.org/wiki/Reservoir_sampling#Fast_Approximation
107-
gap = math.log(1.0 - random.random()) / math.log(1.0 - 10.0/i)
108-
return i + math.floor(gap)
109-
110-
self._sample_counter += 1
111-
if self._next_sample == 0:
112-
if random.randint(1, self._sample_counter) <= 10:
113-
if self._sample_counter > 30:
114-
self._next_sample = compute_next_sample(self._sample_counter)
115-
return True
116-
return False
117-
elif self._sample_counter >= self._next_sample:
118-
self._next_sample = compute_next_sample(self._sample_counter)
119-
return True
120-
return False
50+
# TODO(silviuc): Implement estimated size sampling.
51+
pass
12152

12253
def __str__(self):
12354
return '<%s [%s]>' % (self.__class__.__name__,

google/cloud/dataflow/worker/opcounters_test.py

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
"""Tests for worker counters."""
1616

1717
import logging
18-
import random
1918
import unittest
2019

2120
from google.cloud.dataflow import coders
@@ -92,38 +91,6 @@ def test_update_multiple(self):
9291
opcounts.update_collect()
9392
self.verify_counters(opcounts, 3)
9493

95-
def test_should_sample(self):
96-
# Order of magnitude more buckets than highest constant in code under test.
97-
buckets = [0] * 300
98-
# The seed is arbitrary and exists just to ensure this test is robust.
99-
# If you don't like this seed, try your own; the test should still pass.
100-
random.seed(1717)
101-
# Do enough runs that the expected hits even in the last buckets
102-
# is big enough to expect some statistical smoothing.
103-
total_runs = 10 * len(buckets)
104-
105-
# Fill the buckets.
106-
for _ in xrange(total_runs):
107-
opcounts = OperationCounters(CounterFactory(), 'some-name',
108-
coders.PickleCoder(), 0)
109-
for i in xrange(len(buckets)):
110-
if opcounts.should_sample():
111-
buckets[i] += 1
112-
113-
# Look at the buckets to see if they are likely.
114-
for i in xrange(10):
115-
self.assertEqual(total_runs, buckets[i])
116-
for i in xrange(10, len(buckets)):
117-
self.assertTrue(buckets[i] > 7 * total_runs / i,
118-
'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % (
119-
i, buckets[i],
120-
10 * total_runs / i,
121-
buckets[i] / (10.0 * total_runs / i)))
122-
self.assertTrue(buckets[i] < 14 * total_runs / i,
123-
'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % (
124-
i, buckets[i],
125-
10 * total_runs / i,
126-
buckets[i] / (10.0 * total_runs / i)))
12794

12895
if __name__ == '__main__':
12996
logging.getLogger().setLevel(logging.INFO)

0 commit comments

Comments
 (0)