|
15 | 15 | """Counters collect the progress of the Worker for reporting to the service.""" |
16 | 16 |
|
17 | 17 | from __future__ import absolute_import |
18 | | -import math |
19 | | -import random |
20 | 18 |
|
21 | | -from google.cloud.dataflow.coders import WindowedValueCoder |
22 | | -from google.cloud.dataflow.transforms.window import WindowedValue |
23 | 19 | from google.cloud.dataflow.utils.counters import Counter |
24 | 20 |
|
25 | 21 |
|
26 | 22 | class OperationCounters(object): |
27 | 23 | """The set of basic counters to attach to an Operation.""" |
28 | 24 |
|
29 | 25 | def __init__(self, counter_factory, step_name, coder, output_index): |
30 | | - self._counter_factory = counter_factory |
31 | 26 | self.element_counter = counter_factory.get_counter( |
32 | 27 | '%s-out%d-ElementCount' % (step_name, output_index), Counter.SUM) |
33 | 28 | self.mean_byte_counter = counter_factory.get_counter( |
34 | 29 | '%s-out%d-MeanByteCount' % (step_name, output_index), Counter.MEAN) |
35 | 30 | self.coder = coder |
36 | | - self._active_accumulators = [] |
37 | | - self._sample_counter = 0 |
38 | | - self._next_sample = 0 |
39 | 31 |
|
40 | | - def update_from(self, windowed_value, coder=None): |
| 32 | + def update_from(self, windowed_value, coder=None): # pylint: disable=unused-argument |
41 | 33 | """Add one value to this counter.""" |
42 | 34 | self.element_counter.update(1) |
43 | | - if self.should_sample(): |
44 | | - byte_size_accumulator = self._counter_factory.get_counter( |
45 | | - '%s-temp%d' % (self.mean_byte_counter.name, self._sample_counter), |
46 | | - Counter.SUM) |
47 | | - self._active_accumulators.append(byte_size_accumulator) |
48 | | - # Shuffle operations may pass in their own coder |
49 | | - if coder is None: |
50 | | - coder = self.coder |
51 | | - # Some Readers and Writers return windowed values even |
52 | | - # though their output encoding does not claim to be windowed. |
53 | | - # TODO(ccy): fix output encodings to be consistent here |
54 | | - if (isinstance(windowed_value, WindowedValue) |
55 | | - and not isinstance(coder, WindowedValueCoder)): |
56 | | - coder = WindowedValueCoder(coder) |
57 | | - # TODO(gildea): |
58 | | - # Actually compute the encoded size of this value: |
59 | | - # coder.store_estimated_size(windowed_value, byte_size_accumulator) |
| 35 | + # TODO(silviuc): Implement estimated size sampling. |
| 36 | + # TODO(gildea): |
| 37 | + # Actually compute the encoded size of this value. |
| 38 | + # In spirit, something like this: |
| 39 | + # if coder is None: |
| 40 | + # coder = self.coder |
| 41 | + # coder.store_estimated_size(windowed_value, byte_size_accumulator) |
| 42 | + # but will need to do sampling. |
60 | 43 |
|
61 | 44 | def update_collect(self): |
62 | 45 | """Collects the accumulated size estimates. |
63 | 46 |
|
64 | 47 | Now that the element has been processed, we ask our accumulator |
65 | 48 | for the total and store the result in a counter. |
66 | 49 | """ |
67 | | - for pending in self._active_accumulators: |
68 | | - self.mean_byte_counter.update(pending.value()) |
69 | | - self._active_accumulators = [] |
70 | | - |
71 | | - def should_sample(self): |
72 | | - """Determines whether to sample the next element. |
73 | | -
|
74 | | - Size calculation can be expensive, so we don't do it for each element. |
75 | | - Because we need only an estimate of average size, we sample. |
76 | | -
|
77 | | - We always sample the first 10 elements, then the sampling rate |
78 | | - is approximately 10/N. After reading N elements, of the next N, |
79 | | - we will sample approximately 10*ln(2) (about 7) elements. |
80 | | -
|
81 | | - This algorithm samples at the same rate as Reservoir Sampling, but |
82 | | - it never throws away early results. (Because we keep only a |
83 | | - running accumulation, storage is not a problem, so there is no |
84 | | - need to discard earlier calculations.) |
85 | | -
|
86 | | - Because we accumulate and do not replace, our statistics are |
87 | | - biased toward early data. If the data are distributed uniformly, |
88 | | - this is not a problem. If the data change over time (i.e., the |
89 | | - element size tends to grow or shrink over time), our estimate will |
90 | | - show the bias. We could correct this by giving weight N to each |
91 | | - sample, since each sample is a stand-in for the N/(10*ln(2)) |
92 | | - samples around it, which is proportional to N. Since we do not |
93 | | - expect biased data, for efficiency we omit the extra multiplication. |
94 | | - We could reduce the early-data bias by putting a lower bound on |
95 | | - the sampling rate. |
96 | | -
|
97 | | - Computing random.randint(1, self._sample_counter) for each element |
98 | | - is too slow, so when the sample size is big enough (we estimate 30 |
99 | | - is big enough), we estimate the size of the gap after each sample. |
100 | | - This estimation allows us to call random much less often. |
101 | | -
|
102 | | - Returns: |
103 | | - True if it is time to compute another element's size. |
104 | | - """ |
105 | | - def compute_next_sample(i): |
106 | | - # https://en.wikipedia.org/wiki/Reservoir_sampling#Fast_Approximation |
107 | | - gap = math.log(1.0 - random.random()) / math.log(1.0 - 10.0/i) |
108 | | - return i + math.floor(gap) |
109 | | - |
110 | | - self._sample_counter += 1 |
111 | | - if self._next_sample == 0: |
112 | | - if random.randint(1, self._sample_counter) <= 10: |
113 | | - if self._sample_counter > 30: |
114 | | - self._next_sample = compute_next_sample(self._sample_counter) |
115 | | - return True |
116 | | - return False |
117 | | - elif self._sample_counter >= self._next_sample: |
118 | | - self._next_sample = compute_next_sample(self._sample_counter) |
119 | | - return True |
120 | | - return False |
| 50 | + # TODO(silviuc): Implement estimated size sampling. |
| 51 | + pass |
121 | 52 |
|
122 | 53 | def __str__(self): |
123 | 54 | return '<%s [%s]>' % (self.__class__.__name__, |
|
0 commit comments