code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "micro_speech/simple_features/simple_model_settings.h"
const char* kCategoryLabels[kCategoryCount] = {
"silence",
"unknown",
"yes",
"no",
};
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/simple_model_settings.cc
|
C++
|
apache-2.0
| 837
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
// Keeping these as constant expressions allow us to allocate fixed-sized arrays
// on the stack for our working memory.
// The size of the input time series data we pass to the FFT to produce the
// frequency information. This has to be a power of two, and since we're dealing
// with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
constexpr int kMaxAudioSampleSize = 512;
constexpr int kAudioSampleFrequency = 16000;
// All of these values are derived from the values used during model training,
// if you change your model you'll need to update these constants.
constexpr int kAverageWindowSize = 6;
constexpr int kFeatureSliceSize =
((kMaxAudioSampleSize / 2) + (kAverageWindowSize - 1)) / kAverageWindowSize;
constexpr int kFeatureSliceCount = 49;
constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
constexpr int kFeatureSliceStrideMs = 20;
constexpr int kFeatureSliceDurationMs = 30;
constexpr int kCategoryCount = 4;
constexpr int kSilenceIndex = 0;
constexpr int kUnknownIndex = 1;
extern const char* kCategoryLabels[kCategoryCount];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/simple_model_settings.h
|
C++
|
apache-2.0
| 2,055
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// See the header for documentation on the meaning of this data.
#include "tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h"
const uint8_t g_yes_power_spectrum_data[g_yes_power_spectrum_data_size] = {
8, 89, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 13, 1, 6, 23, 20, 6, 4, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/yes_power_spectrum_data.cc
|
C++
|
apache-2.0
| 1,058
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This data was extracted from the larger feature data held in
// no_features_data.cc and consists of the 26th spectrogram slice of 43 values.
// This is the expected result of running the sample data in
// yes_30ms_sample_data.cc through the preprocessing pipeline.
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
#include <cstdint>
constexpr int g_yes_power_spectrum_data_size = 43;
extern const uint8_t g_yes_power_spectrum_data[];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/yes_power_spectrum_data.h
|
C++
|
apache-2.0
| 1,350
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h"
/* File automatically created by
* tensorflow/examples/speech_commands/wav_to_features.py \
* --sample_rate=16000 \
* --clip_duration_ms=1000 \
* --window_size_ms=30 \
* --window_stride_ms=20 \
* --feature_bin_count=40 \
* --quantize=1 \
* --preprocess="average" \
* --input_wav="speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav" \
* --output_c_file="yes_simple_features_data.cc" \
*/
const int g_yes_simple_f2e59fea_nohash_1_width = 43;
const int g_yes_simple_f2e59fea_nohash_1_height = 49;
const unsigned char g_yes_simple_f2e59fea_nohash_1_data[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 19, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 3, 3, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 8, 89, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 13,
1, 6, 23, 20, 6, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 177, 42, 1,
1, 0, 0, 0, 0, 2, 3, 119, 51, 5, 139, 92, 58, 58, 15, 2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 13, 165, 176, 3, 1, 1, 0, 0, 1, 1, 32, 214,
26, 19, 113, 103, 28, 22, 27, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 55, 128,
27, 1, 1, 0, 1, 4, 2, 52, 93, 10, 28, 156, 10, 21, 21, 3, 3,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 14, 99, 32, 65, 7, 1, 2, 2, 6, 13, 121,
36, 15, 11, 112, 125, 14, 5, 13, 4, 4, 2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 25,
32, 5, 1, 0, 0, 0, 1, 0, 7, 5, 1, 1, 3, 3, 0, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 13, 13, 5, 1, 0, 0, 0, 0, 0, 3,
4, 1, 0, 1, 2, 3, 1, 1, 1, 4, 8, 1, 2, 1, 3, 1, 1,
0, 1, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
8, 2, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 2, 0, 2,
1, 0, 2, 0, 2, 2, 3, 1, 1, 0, 1, 1, 4, 5, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 0, 1, 3, 1,
1, 3, 1, 1, 6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 6, 2, 4, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 1, 2, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 2, 3, 5, 2, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 2, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/yes_simple_features_data.cc
|
C++
|
apache-2.0
| 11,146
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
extern const int g_yes_simple_f2e59fea_nohash_1_width;
extern const int g_yes_simple_f2e59fea_nohash_1_height;
extern const unsigned char g_yes_simple_f2e59fea_nohash_1_data[];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/simple_features/yes_simple_features_data.h
|
C
|
apache-2.0
| 1,140
|
import pyaudio
import wave
import random
import time
import os
from IPython import display
#from pydub import AudioSebment
#from pydub.playback import play
#from playsound import playsound
CHUNK = 2
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
SAMPLEWIDTH = 2
RECORD_SECONDS = 1
FILE_FORMAT = '.wav'
RECODER_NAME = 'lk'
#play stream
def play_wav(name, pyaudio):
f = wave.open(name,"rb")
#open stream
play_stream = pyaudio.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
#read data
data = f.readframes(CHUNK)
while data:
play_stream.write(data)
data = f.readframes(CHUNK)
#stop stream
play_stream.stop_stream()
play_stream.close()
#close PyAudio
# pyaudio.terminate()
f.close()
def save_wav(name, frames):
wf = wave.open(name, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def record_wav(duration):
time.sleep(0.2) # 1sec, 0.1sec
print("开始录音,请说话......")
# count = 3
# for i in range(3):
# time.sleep(0.2) # 1sec, 0.1sec
# count -= 1
# print(count)
frames = []
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
for i in range(0, int(RATE * duration / SAMPLEWIDTH)):
data = stream.read(CHUNK, exception_on_overflow = False)
frames.append(data)
#count = 0
#while count < int(RECORD_SECONDS * RATE):
# data = stream.read(CHUNK)
# frames.append(data)
# count += CHUNK
stream.stop_stream()
stream.close()
print("录音结束!")
return frames
# main function
if __name__ == '__main__':
p = pyaudio.PyAudio()
input('请按回车键开始录制!\n')
# record files
count = 0
for i in range(250):
print("开始第%d录制!" % count)
hash_name = str(hex(abs(hash(RECODER_NAME + str(random.random()))) % 1000000000)).replace('0x','') \
+ '_nohash_' + str(count) + FILE_FORMAT
rframes = record_wav(1) # record 1 sec
save_wav(hash_name, rframes)
#time.sleep(0.5) # 1sec, 0.1sec
print("录音回放开始!\n")
play_wav(hash_name, p)
print("录音回放结束!\n")
value = input("按‘回车’保存,放弃本条请按‘其他’键并回车!\n")
if (value == ''):
count += 1
print("保存录音成功!")
else:
os.system('rm -rf %s' % hash_name)
print("已删除本条录音!")
input('请按回车键开始录制!\n')
#display.display(display.Audio(hash_name, rate=16000))
#wav = AudioSegment.from_wav(hash_name)
#play(wav)
p.terminate()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/record.py
|
Python
|
apache-2.0
| 3,031
|
import pyaudio
import wave
import random
import time
import os
from IPython import display
#from pydub import AudioSebment
#from pydub.playback import play
#from playsound import playsound
CHUNK = 2
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
SAMPLEWIDTH = 2
RECORD_SECONDS = 1
FILE_FORMAT = '.wav'
RECODER_NAME = 'lk'
#play stream
def play_wav(name, pyaudio):
f = wave.open(name,"rb")
#instantiate PyAudio
# pyaudio = pyaudio.PyAudio()
#open stream
play_stream = pyaudio.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
#read data
data = f.readframes(CHUNK)
while data:
play_stream.write(data)
data = f.readframes(CHUNK)
#stop stream
play_stream.stop_stream()
play_stream.close()
#close PyAudio
# pyaudio.terminate()
f.close()
def save_wav(name, frames):
wf = wave.open(name, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def record_wav(duration):
time.sleep(0.2) # 1sec, 0.1sec
# count = 3
# for i in range(3):
# time.sleep(0.2) # 1sec, 0.1sec
# count -= 1
# print(count)
frames = []
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
for i in range(0, int(RATE * duration / SAMPLEWIDTH)):
data = stream.read(CHUNK, exception_on_overflow = False)
frames.append(data)
#count = 0
#while count < int(RECORD_SECONDS * RATE):
# data = stream.read(CHUNK)
# frames.append(data)
# count += CHUNK
stream.stop_stream()
stream.close()
print("录音结束!")
return frames
p = pyaudio.PyAudio()
input('请按回车键开始录制!\n')
# record files
count = 0
for i in range(250):
print("开始录制噪音,请耐心等待!")
hash_name = 'noise' + FILE_FORMAT
rframes = record_wav(1) # record 1 minute
save_wav(hash_name, rframes)
#time.sleep(0.5) # 1sec, 0.1sec
print("录音回放开始!\n")
play_wav(hash_name, p)
print("录音回放结束!\n")
value = input("按‘回车’保存,重新录制请按‘其他’键并回车!\n")
if (value == ''):
count += 1
print("保存噪音成功!")
break
else:
os.system('rm -rf %s' % hash_name)
print("已删除本条录音!")
input('请按回车键开始录制!\n')
p.terminate()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/record_noise.py
|
Python
|
apache-2.0
| 2,698
|
load("//tensorflow:tensorflow.bzl", "tf_cc_binary", "tf_cc_test", "tf_py_test")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)
py_binary(
name = "accuracy_utils_py",
srcs = ["accuracy_utils.py"],
main = "accuracy_utils.py",
python_version = "PY3",
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
py_binary(
name = "recognize_commands_py",
srcs = ["recognize_commands.py"],
main = "recognize_commands.py",
python_version = "PY3",
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
py_binary(
name = "test_streaming_accuracy_py",
srcs = ["test_streaming_accuracy.py"],
main = "test_streaming_accuracy.py",
python_version = "PY3",
srcs_version = "PY3",
deps = [
":accuracy_utils_py",
":recognize_commands_py",
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
],
)
py_library(
name = "models",
srcs = [
"models.py",
],
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
tf_py_test(
name = "models_test",
size = "small",
srcs = ["models_test.py"],
tags = [
"no_pip", # b/131330719
],
deps = [
":models",
"//tensorflow/python:client_testlib",
],
)
py_library(
name = "input_data",
srcs = [
"input_data.py",
],
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/lite/experimental/microfrontend:audio_microfrontend_py",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
tf_py_test(
name = "input_data_test",
size = "small",
srcs = ["input_data_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":input_data",
":models",
"//tensorflow/python:client_testlib",
],
)
py_binary(
name = "train",
srcs = ["train.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [":train_main_lib"],
)
py_library(
name = "train_main_lib",
srcs = [
"train.py",
],
srcs_version = "PY3",
deps = [
":input_data",
":models",
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
tf_py_test(
name = "train_test",
size = "small",
srcs = ["train_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":train_main_lib",
"//tensorflow/python:client_testlib",
],
)
py_binary(
name = "freeze",
srcs = ["freeze.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [":freeze_main_lib"],
)
py_library(
name = "freeze_main_lib",
srcs = ["freeze.py"],
srcs_version = "PY3",
deps = [":freeze_lib"],
)
py_library(
name = "freeze_lib",
srcs = [
"freeze.py",
],
srcs_version = "PY3",
tags = [
"no_pip", # b/131330719
],
deps = [
":input_data",
":models",
"//tensorflow:tensorflow_py",
],
)
tf_py_test(
name = "freeze_test",
size = "small",
srcs = ["freeze_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":freeze_main_lib",
"//tensorflow/python:client_testlib",
],
)
py_binary(
name = "wav_to_features",
srcs = ["wav_to_features.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [":wav_to_features_main_lib"],
)
py_library(
name = "wav_to_features_main_lib",
srcs = ["wav_to_features.py"],
srcs_version = "PY3",
deps = [":wav_to_features_lib"],
)
py_library(
name = "wav_to_features_lib",
srcs = [
"wav_to_features.py",
],
srcs_version = "PY3",
deps = [
":input_data",
":models",
"//tensorflow:tensorflow_py",
],
)
tf_py_test(
name = "wav_to_features_test",
size = "small",
srcs = ["wav_to_features_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":wav_to_features_main_lib",
"//tensorflow/python:client_testlib",
],
)
py_binary(
name = "generate_streaming_test_wav",
srcs = ["generate_streaming_test_wav.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [":generate_streaming_test_wav_main_lib"],
)
py_library(
name = "generate_streaming_test_wav_main_lib",
srcs = ["generate_streaming_test_wav.py"],
srcs_version = "PY3",
deps = [":generate_streaming_test_wav_lib"],
)
py_library(
name = "generate_streaming_test_wav_lib",
srcs = [
"generate_streaming_test_wav.py",
],
srcs_version = "PY3",
deps = [
":input_data",
":models",
"//tensorflow:tensorflow_py",
"//third_party/py/numpy",
],
)
tf_py_test(
name = "generate_streaming_test_wav_test",
size = "small",
srcs = ["generate_streaming_test_wav_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":generate_streaming_test_wav_main_lib",
"//tensorflow/python:client_testlib",
],
)
tf_cc_binary(
name = "label_wav_cc",
srcs = [
"label_wav.cc",
],
deps = [
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:tensorflow",
],
)
py_binary(
name = "label_wav",
srcs = ["label_wav.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [":label_wav_main_lib"],
)
py_library(
name = "label_wav_main_lib",
srcs = ["label_wav.py"],
srcs_version = "PY3",
deps = [":label_wav_lib"],
)
py_library(
name = "label_wav_lib",
srcs = [
"label_wav.py",
],
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
],
)
tf_py_test(
name = "label_wav_test",
size = "medium",
srcs = ["label_wav_test.py"],
tags = [
"no_pip", # b/131330719
"v1only", # uses contrib
],
deps = [
":label_wav_main_lib",
"//tensorflow/python:client_testlib",
],
)
cc_library(
name = "recognize_commands",
srcs = [
"recognize_commands.cc",
],
hdrs = [
"recognize_commands.h",
],
deps = [
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:tensorflow",
],
)
tf_cc_test(
name = "recognize_commands_test",
size = "medium",
srcs = [
"recognize_commands_test.cc",
],
deps = [
":recognize_commands",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
)
cc_library(
name = "accuracy_utils",
srcs = [
"accuracy_utils.cc",
],
hdrs = [
"accuracy_utils.h",
],
deps = [
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:tensorflow",
],
)
tf_cc_test(
name = "accuracy_utils_test",
size = "medium",
srcs = [
"accuracy_utils_test.cc",
],
deps = [
":accuracy_utils",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
)
tf_cc_binary(
name = "test_streaming_accuracy",
srcs = [
"test_streaming_accuracy.cc",
],
deps = [
":accuracy_utils",
":recognize_commands",
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
],
)
py_library(
name = "test_lib",
srcs_version = "PY3",
deps = [
":freeze",
":generate_streaming_test_wav",
":input_data",
":label_wav",
":models",
":train",
":wav_to_features",
],
)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/BUILD
|
Starlark
|
apache-2.0
| 8,869
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include <fstream>
#include <iomanip>
#include <unordered_set>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
Status ReadGroundTruthFile(const string& file_name,
std::vector<std::pair<string, int64>>* result) {
std::ifstream file(file_name);
if (!file) {
return tensorflow::errors::NotFound("Ground truth file '", file_name,
"' not found.");
}
result->clear();
string line;
while (std::getline(file, line)) {
std::vector<string> pieces = tensorflow::str_util::Split(line, ',');
if (pieces.size() != 2) {
continue;
}
float timestamp;
if (!tensorflow::strings::safe_strtof(pieces[1].c_str(), ×tamp)) {
return tensorflow::errors::InvalidArgument(
"Wrong number format at line: ", line);
}
string label = pieces[0];
auto timestamp_int64 = static_cast<int64>(timestamp);
result->push_back({label, timestamp_int64});
}
std::sort(result->begin(), result->end(),
[](const std::pair<string, int64>& left,
const std::pair<string, int64>& right) {
return left.second < right.second;
});
return Status::OK();
}
void CalculateAccuracyStats(
const std::vector<std::pair<string, int64>>& ground_truth_list,
const std::vector<std::pair<string, int64>>& found_words,
int64 up_to_time_ms, int64 time_tolerance_ms,
StreamingAccuracyStats* stats) {
int64 latest_possible_time;
if (up_to_time_ms == -1) {
latest_possible_time = std::numeric_limits<int64>::max();
} else {
latest_possible_time = up_to_time_ms + time_tolerance_ms;
}
stats->how_many_ground_truth_words = 0;
for (const std::pair<string, int64>& ground_truth : ground_truth_list) {
const int64 ground_truth_time = ground_truth.second;
if (ground_truth_time > latest_possible_time) {
break;
}
++stats->how_many_ground_truth_words;
}
stats->how_many_false_positives = 0;
stats->how_many_correct_words = 0;
stats->how_many_wrong_words = 0;
std::unordered_set<int64> has_ground_truth_been_matched;
for (const std::pair<string, int64>& found_word : found_words) {
const string& found_label = found_word.first;
const int64 found_time = found_word.second;
const int64 earliest_time = found_time - time_tolerance_ms;
const int64 latest_time = found_time + time_tolerance_ms;
bool has_match_been_found = false;
for (const std::pair<string, int64>& ground_truth : ground_truth_list) {
const int64 ground_truth_time = ground_truth.second;
if ((ground_truth_time > latest_time) ||
(ground_truth_time > latest_possible_time)) {
break;
}
if (ground_truth_time < earliest_time) {
continue;
}
const string& ground_truth_label = ground_truth.first;
if ((ground_truth_label == found_label) &&
(has_ground_truth_been_matched.count(ground_truth_time) == 0)) {
++stats->how_many_correct_words;
} else {
++stats->how_many_wrong_words;
}
has_ground_truth_been_matched.insert(ground_truth_time);
has_match_been_found = true;
break;
}
if (!has_match_been_found) {
++stats->how_many_false_positives;
}
}
stats->how_many_ground_truth_matched = has_ground_truth_been_matched.size();
}
void PrintAccuracyStats(const StreamingAccuracyStats& stats) {
if (stats.how_many_ground_truth_words == 0) {
LOG(INFO) << "No ground truth yet, " << stats.how_many_false_positives
<< " false positives";
} else {
float any_match_percentage =
(stats.how_many_ground_truth_matched * 100.0f) /
stats.how_many_ground_truth_words;
float correct_match_percentage = (stats.how_many_correct_words * 100.0f) /
stats.how_many_ground_truth_words;
float wrong_match_percentage = (stats.how_many_wrong_words * 100.0f) /
stats.how_many_ground_truth_words;
float false_positive_percentage =
(stats.how_many_false_positives * 100.0f) /
stats.how_many_ground_truth_words;
LOG(INFO) << std::setprecision(1) << std::fixed << any_match_percentage
<< "% matched, " << correct_match_percentage << "% correctly, "
<< wrong_match_percentage << "% wrongly, "
<< false_positive_percentage << "% false positives ";
}
}
} // namespace tensorflow
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/accuracy_utils.cc
|
C++
|
apache-2.0
| 5,262
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_ACCURACY_UTILS_H_
#define TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_ACCURACY_UTILS_H_
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
struct StreamingAccuracyStats {
StreamingAccuracyStats()
: how_many_ground_truth_words(0),
how_many_ground_truth_matched(0),
how_many_false_positives(0),
how_many_correct_words(0),
how_many_wrong_words(0) {}
int32 how_many_ground_truth_words;
int32 how_many_ground_truth_matched;
int32 how_many_false_positives;
int32 how_many_correct_words;
int32 how_many_wrong_words;
};
// Takes a file name, and loads a list of expected word labels and times from
// it, as comma-separated variables.
Status ReadGroundTruthFile(const string& file_name,
std::vector<std::pair<string, int64>>* result);
// Given ground truth labels and corresponding predictions found by a model,
// figure out how many were correct. Takes a time limit, so that only
// predictions up to a point in time are considered, in case we're evaluating
// accuracy when the model has only been run on part of the stream.
void CalculateAccuracyStats(
const std::vector<std::pair<string, int64>>& ground_truth_list,
const std::vector<std::pair<string, int64>>& found_words,
int64 up_to_time_ms, int64 time_tolerance_ms,
StreamingAccuracyStats* stats);
// Writes a human-readable description of the statistics to stdout.
void PrintAccuracyStats(const StreamingAccuracyStats& stats);
} // namespace tensorflow
#endif // TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_ACCURACY_UTILS_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/accuracy_utils.h
|
C++
|
apache-2.0
| 2,359
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for getting accuracy statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class StreamingAccuracyStats(object):
"""Get streaming accuracy statistics every time a new command is founded.
Attributes:
_how_many_gt: How many ground truths.
_how_many_gt_matched: How many ground truths have been matched.
_how_many_fp: How many commands have been fired as false positive.
_how_many_c: How many commands have been fired correctly.
_how_many_w: How many commands have been fired wrongly.
_gt_occurrence: A list to record which commands and when it occurs in the
input audio stream.
_previous_c: A variable to record the last status of _how_many_c.
_previous_w: A variable to record the last status of _how_many_w.
_previous_fp: A variable to record the last status of _how_many_fp.
"""
def __init__(self):
"""Init StreamingAccuracyStats with void or zero values."""
self._how_many_gt = 0
self._how_many_gt_matched = 0
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
self._gt_occurrence = []
self._previous_c = 0
self._previous_w = 0
self._previous_fp = 0
def read_ground_truth_file(self, file_name):
"""Load ground truth and timestamp pairs and store it in time order."""
with open(file_name, 'r') as f:
for line in f:
line_split = line.strip().split(',')
if len(line_split) != 2:
continue
timestamp = round(float(line_split[1]))
label = line_split[0]
self._gt_occurrence.append([label, timestamp])
self._gt_occurrence = sorted(self._gt_occurrence, key=lambda item: item[1])
def delta(self):
"""Compute delta of StreamingAccuracyStats against last status."""
fp_delta = self._how_many_fp - self._previous_fp
w_delta = self._how_many_w - self._previous_w
c_delta = self._how_many_c - self._previous_c
if fp_delta == 1:
recognition_state = '(False Positive)'
elif c_delta == 1:
recognition_state = '(Correct)'
elif w_delta == 1:
recognition_state = '(Wrong)'
else:
raise ValueError('Unexpected state in statistics')
# Update the previous status
self._previous_c = self._how_many_c
self._previous_w = self._how_many_w
self._previous_fp = self._how_many_fp
return recognition_state
def calculate_accuracy_stats(self, found_words, up_to_time_ms,
time_tolerance_ms):
"""Calculate accuracy statistics when a new commands is founded.
Given ground truth and corresponding predictions founded by
model, figure out how many were correct. Take a tolerance time, so that only
predictions up to a point in time are considered.
Args:
found_words: A list of all founded commands up to now.
up_to_time_ms: End timestamp of this audio piece.
time_tolerance_ms: The tolerance milliseconds before and after
up_to_time_ms to match a ground truth.
"""
if up_to_time_ms == -1:
latest_possible_time = np.inf
else:
latest_possible_time = up_to_time_ms + time_tolerance_ms
self._how_many_gt = 0
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if ground_truth_time > latest_possible_time:
break
self._how_many_gt += 1
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
has_gt_matched = []
for found_word in found_words:
found_label = found_word[0]
found_time = found_word[1]
earliest_time = found_time - time_tolerance_ms
latest_time = found_time + time_tolerance_ms
has_matched_been_found = False
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if (ground_truth_time > latest_time or
ground_truth_time > latest_possible_time):
break
if ground_truth_time < earliest_time:
continue
ground_truth_label = ground_truth[0]
if (ground_truth_label == found_label and
has_gt_matched.count(ground_truth_time) == 0):
self._how_many_c += 1
else:
self._how_many_w += 1
has_gt_matched.append(ground_truth_time)
has_matched_been_found = True
break
if not has_matched_been_found:
self._how_many_fp += 1
self._how_many_gt_matched = len(has_gt_matched)
def print_accuracy_stats(self):
"""Write a human-readable description of the statistics to stdout."""
if self._how_many_gt == 0:
tf.compat.v1.logging.info('No ground truth yet, {}false positives'.format(
self._how_many_fp))
else:
any_match_percentage = self._how_many_gt_matched / self._how_many_gt * 100
correct_match_percentage = self._how_many_c / self._how_many_gt * 100
wrong_match_percentage = self._how_many_w / self._how_many_gt * 100
false_positive_percentage = self._how_many_fp / self._how_many_gt * 100
tf.compat.v1.logging.info(
'{:.1f}% matched, {:.1f}% correct, {:.1f}% wrong, '
'{:.1f}% false positive'.format(any_match_percentage,
correct_match_percentage,
wrong_match_percentage,
false_positive_percentage))
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/accuracy_utils.py
|
Python
|
apache-2.0
| 6,159
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(AccuracyUtilsTest, ReadGroundTruthFile) {
string file_name = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(),
"ground_truth.txt");
string file_data = "a,10\nb,12\n";
TF_ASSERT_OK(WriteStringToFile(Env::Default(), file_name, file_data));
std::vector<std::pair<string, int64>> ground_truth;
TF_ASSERT_OK(ReadGroundTruthFile(file_name, &ground_truth));
ASSERT_EQ(2, ground_truth.size());
EXPECT_EQ("a", ground_truth[0].first);
EXPECT_EQ(10, ground_truth[0].second);
EXPECT_EQ("b", ground_truth[1].first);
EXPECT_EQ(12, ground_truth[1].second);
}
TEST(AccuracyUtilsTest, CalculateAccuracyStats) {
StreamingAccuracyStats stats;
CalculateAccuracyStats({{"a", 1000}, {"b", 9000}},
{{"a", 1200}, {"b", 5000}, {"a", 8700}}, 10000, 500,
&stats);
EXPECT_EQ(2, stats.how_many_ground_truth_words);
EXPECT_EQ(2, stats.how_many_ground_truth_matched);
EXPECT_EQ(1, stats.how_many_false_positives);
EXPECT_EQ(1, stats.how_many_correct_words);
EXPECT_EQ(1, stats.how_many_wrong_words);
}
TEST(AccuracyUtilsTest, PrintAccuracyStats) {
StreamingAccuracyStats stats;
PrintAccuracyStats(stats);
}
} // namespace tensorflow
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/accuracy_utils_test.cc
|
C++
|
apache-2.0
| 2,298
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts a trained checkpoint into a frozen model for mobile inference.
Once you've trained a model using the `train.py` script, you can use this tool
to convert it into a binary GraphDef file that can be loaded into the Android,
iOS, or Raspberry Pi example code. Here's an example of how to run it:
bazel run tensorflow/examples/speech_commands/freeze -- \
--sample_rate=16000 --dct_coefficient_count=40 --window_size_ms=20 \
--window_stride_ms=10 --clip_duration_ms=1000 \
--model_architecture=conv \
--start_checkpoint=/tmp/speech_commands_train/conv.ckpt-1300 \
--output_file=/tmp/my_frozen_graph.pb
One thing to watch out for is that you need to pass in the same arguments for
`sample_rate` and other command line variables here as you did for the training
script.
The resulting graph has an input for WAV-encoded data named 'wav_data', one for
raw PCM data (as floats in the range -1.0 to 1.0) called 'decoded_sample_data',
and the output is called 'labels_softmax'.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tensorflow as tf
import input_data
import models
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import gen_audio_ops as audio_ops
# If it's available, load the specialized feature generator. If this doesn't
# work, try building with bazel instead of running the Python script directly.
# bazel run tensorflow/examples/speech_commands:freeze_graph
try:
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top
except ImportError:
frontend_op = None
FLAGS = None
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms, window_size_ms, window_stride_ms,
feature_bin_count, model_architecture, preprocess):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
feature_bin_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
preprocess: How the spectrogram is processed to produce features, for
example 'mfcc', 'average', or 'micro'.
Returns:
Input and output tensor objects.
Raises:
Exception: If the preprocessing mode isn't recognized.
"""
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, feature_bin_count, preprocess)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.compat.v1.placeholder(tf.string, [],
name='wav_data')
decoded_sample_data = tf.audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
spectrogram = audio_ops.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
if preprocess == 'average':
fingerprint_input = tf.nn.pool(
input=tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
elif preprocess == 'mfcc':
fingerprint_input = audio_ops.mfcc(
spectrogram,
sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
elif preprocess == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running TensorFlow'
' directly from Python, you need to build and run through Bazel, for'
' example'
' `bazel run tensorflow/examples/speech_commands:freeze_graph`')
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(
tf.multiply(decoded_sample_data.audio, 32767), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
fingerprint_input = tf.multiply(micro_frontend, (10.0 / 256.0))
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (preprocess))
fingerprint_size = model_settings['fingerprint_size']
reshaped_input = tf.reshape(fingerprint_input, [-1, fingerprint_size])
logits = models.create_model(
reshaped_input, model_settings, model_architecture, is_training=False,
runtime_settings=runtime_settings)
# Create an output to use for inference.
softmax = tf.nn.softmax(logits, name='labels_softmax')
return reshaped_input, softmax
def save_graph_def(file_name, frozen_graph_def):
"""Writes a graph def file out to disk.
Args:
file_name: Where to save the file.
frozen_graph_def: GraphDef proto object to save.
"""
tf.io.write_graph(
frozen_graph_def,
os.path.dirname(file_name),
os.path.basename(file_name),
as_text=False)
tf.compat.v1.logging.info('Saved frozen graph to %s', file_name)
def save_saved_model(file_name, sess, input_tensor, output_tensor):
"""Writes a SavedModel out to disk.
Args:
file_name: Where to save the file.
sess: TensorFlow session containing the graph.
input_tensor: Tensor object defining the input's properties.
output_tensor: Tensor object defining the output's properties.
"""
# Store the frozen graph as a SavedModel for v2 compatibility.
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(file_name)
tensor_info_inputs = {
'input': tf.compat.v1.saved_model.utils.build_tensor_info(input_tensor)
}
tensor_info_outputs = {
'output': tf.compat.v1.saved_model.utils.build_tensor_info(output_tensor)
}
signature = (
tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=tf.compat.v1.saved_model.signature_constants
.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess,
[tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
tf.compat.v1.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},
)
builder.save()
def main(_):
if FLAGS.quantize:
try:
_ = tf.contrib
except AttributeError as e:
msg = e.args[0]
msg += ('\n\n The --quantize option still requires contrib, which is not '
'part of TensorFlow 2.0. Please install a previous version:'
'\n `pip install tensorflow<=1.15`')
e.args = (msg,)
raise e
# Create the model and load its weights.
sess = tf.compat.v1.InteractiveSession()
input_tensor, output_tensor = create_inference_graph(
FLAGS.wanted_words, FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.clip_stride_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms,
FLAGS.feature_bin_count, FLAGS.model_architecture, FLAGS.preprocess)
if FLAGS.quantize:
tf.contrib.quantize.create_eval_graph()
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
# Turn all the variables into inline constants inside the graph and save it.
frozen_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['labels_softmax'])
if FLAGS.save_format == 'graph_def':
save_graph_def(FLAGS.output_file, frozen_graph_def)
elif FLAGS.save_format == 'saved_model':
save_saved_model(FLAGS.output_file, sess, input_tensor, output_tensor)
else:
raise Exception('Unknown save format "%s" (should be "graph_def" or'
' "saved_model")' % (FLAGS.save_format))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--clip_stride_ms',
type=int,
default=30,
help='How often to run recognition. Useful for models with cache.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_file', type=str, help='Where to save the frozen graph.')
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc" or "average"')
parser.add_argument(
'--save_format',
type=str,
default='graph_def',
help='How to save the result. Can be "graph_def" or "saved_model"')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/freeze.py
|
Python
|
apache-2.0
| 11,759
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.examples.speech_commands import freeze
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops.variables import global_variables_initializer
from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(1, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithoutMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMicro(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='micro')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
@test_util.run_deprecated_v1
def testFeatureBinCount(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=80,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateSavedModel(self):
tmp_dir = self.get_temp_dir()
saved_model_path = os.path.join(tmp_dir, 'saved_model')
with self.cached_session() as sess:
input_tensor, output_tensor = freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='micro')
global_variables_initializer().run()
graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['labels_softmax'])
freeze.save_saved_model(saved_model_path, sess, input_tensor,
output_tensor)
if __name__ == '__main__':
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/freeze_test.py
|
Python
|
apache-2.0
| 5,086
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a .wav file with synthesized conversational data and labels.
The best way to estimate the real-world performance of an audio recognition
model is by running it against a continuous stream of data, the way that it
would be used in an application. Training evaluations are only run against
discrete individual samples, so the results aren't as realistic.
To make it easy to run evaluations against audio streams, this script uses
samples from the testing partition of the data set, mixes them in at random
positions together with background noise, and saves out the result as one long
audio file.
Here's an example of generating a test file:
bazel run tensorflow/examples/speech_commands:generate_streaming_test_wav -- \
--data_dir=/tmp/my_wavs --background_dir=/tmp/my_backgrounds \
--background_volume=0.1 --test_duration_seconds=600 \
--output_audio_file=/tmp/streaming_test.wav \
--output_labels_file=/tmp/streaming_test_labels.txt
Once you've created a streaming audio file, you can then use the
test_streaming_accuracy tool to calculate accuracy metrics for a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
FLAGS = None
def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,
clip_duration, sample_volume, ramp_in, ramp_out):
"""Mixes the sample data into the main track at the specified offset.
Args:
track_data: Numpy array holding main audio data. Modified in-place.
track_offset: Where to mix the sample into the main track.
sample_data: Numpy array of audio data to mix into the main track.
sample_offset: Where to start in the audio sample.
clip_duration: How long the sample segment is.
sample_volume: Loudness to mix the sample in at.
ramp_in: Length in samples of volume increase stage.
ramp_out: Length in samples of volume decrease stage.
"""
ramp_out_index = clip_duration - ramp_out
track_end = min(track_offset + clip_duration, track_data.shape[0])
track_end = min(track_end,
track_offset + (sample_data.shape[0] - sample_offset))
sample_range = track_end - track_offset
for i in range(sample_range):
if i < ramp_in:
envelope_scale = i / ramp_in
elif i > ramp_out_index:
envelope_scale = (clip_duration - i) / ramp_out
else:
envelope_scale = 1
sample_input = sample_data[sample_offset + i]
track_data[track_offset
+ i] += sample_input * envelope_scale * sample_volume
def main(_):
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.feature_bin_count,
'mfcc')
audio_processor = input_data.AudioProcessor(
'', FLAGS.data_dir, FLAGS.silence_percentage, 10,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings, FLAGS.data_dir)
output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds
output_audio = np.zeros((output_audio_sample_count,), dtype=np.float32)
# Set up background audio.
background_crossover_ms = 500
background_segment_duration_ms = (
FLAGS.clip_duration_ms + background_crossover_ms)
background_segment_duration_samples = int(
(background_segment_duration_ms * FLAGS.sample_rate) / 1000)
background_segment_stride_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
background_ramp_samples = int(
((background_crossover_ms / 2) * FLAGS.sample_rate) / 1000)
# Mix the background audio into the main track.
how_many_backgrounds = int(
math.ceil(output_audio_sample_count / background_segment_stride_samples))
for i in range(how_many_backgrounds):
output_offset = int(i * background_segment_stride_samples)
background_index = np.random.randint(len(audio_processor.background_data))
background_samples = audio_processor.background_data[background_index]
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_volume = np.random.uniform(0, FLAGS.background_volume)
mix_in_audio_sample(output_audio, output_offset, background_samples,
background_offset, background_segment_duration_samples,
background_volume, background_ramp_samples,
background_ramp_samples)
# Mix the words into the main track, noting their labels and positions.
output_labels = []
word_stride_ms = FLAGS.clip_duration_ms + FLAGS.word_gap_ms
word_stride_samples = int((word_stride_ms * FLAGS.sample_rate) / 1000)
clip_duration_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
word_gap_samples = int((FLAGS.word_gap_ms * FLAGS.sample_rate) / 1000)
how_many_words = int(
math.floor(output_audio_sample_count / word_stride_samples))
all_test_data, all_test_labels = audio_processor.get_unprocessed_data(
-1, model_settings, 'testing')
for i in range(how_many_words):
output_offset = (
int(i * word_stride_samples) + np.random.randint(word_gap_samples))
output_offset_ms = (output_offset * 1000) / FLAGS.sample_rate
is_unknown = np.random.randint(100) < FLAGS.unknown_percentage
if is_unknown:
wanted_label = input_data.UNKNOWN_WORD_LABEL
else:
wanted_label = words_list[2 + np.random.randint(len(words_list) - 2)]
test_data_start = np.random.randint(len(all_test_data))
found_sample_data = None
index_lookup = np.arange(len(all_test_data), dtype=np.int32)
np.random.shuffle(index_lookup)
for test_data_offset in range(len(all_test_data)):
test_data_index = index_lookup[(
test_data_start + test_data_offset) % len(all_test_data)]
current_label = all_test_labels[test_data_index]
if current_label == wanted_label:
found_sample_data = all_test_data[test_data_index]
break
mix_in_audio_sample(output_audio, output_offset, found_sample_data, 0,
clip_duration_samples, 1.0, 500, 500)
output_labels.append({'label': wanted_label, 'time': output_offset_ms})
input_data.save_wav_file(FLAGS.output_audio_file, output_audio,
FLAGS.sample_rate)
tf.compat.v1.logging.info('Saved streaming test wav to %s',
FLAGS.output_audio_file)
with open(FLAGS.output_labels_file, 'w') as f:
for output_label in output_labels:
f.write('%s, %f\n' % (output_label['label'], output_label['time']))
tf.compat.v1.logging.info('Saved streaming test labels to %s',
FLAGS.output_labels_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_dir',
type=str,
default='',
help="""\
Path to a directory of .wav files to mix in as background noise during training.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs.',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_audio_file',
type=str,
default='/tmp/speech_commands_train/streaming_test.wav',
help='File to save the generated test audio to.')
parser.add_argument(
'--output_labels_file',
type=str,
default='/tmp/speech_commands_train/streaming_test_labels.txt',
help='File to save the generated test labels to.')
parser.add_argument(
'--test_duration_seconds',
type=int,
default=600,
help='How long the generated test audio file should be.',)
parser.add_argument(
'--word_gap_ms',
type=int,
default=2000,
help='How long the average gap should be between words.',)
parser.add_argument(
'--unknown_percentage',
type=int,
default=30,
help='What percentage of words should be unknown.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/generate_streaming_test_wav.py
|
Python
|
apache-2.0
| 11,074
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for test file generation for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.examples.speech_commands import generate_streaming_test_wav
from tensorflow.python.platform import test
class GenerateStreamingTestWavTest(test.TestCase):
def testMixInAudioSample(self):
track_data = np.zeros([10000])
sample_data = np.ones([1000])
generate_streaming_test_wav.mix_in_audio_sample(
track_data, 2000, sample_data, 0, 1000, 1.0, 100, 100)
self.assertNear(1.0, track_data[2500], 0.0001)
self.assertNear(0.0, track_data[3500], 0.0001)
if __name__ == "__main__":
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/generate_streaming_test_wav_test.py
|
Python
|
apache-2.0
| 1,416
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import math
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import gen_audio_ops as audio_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
tf.compat.v1.disable_eager_execution()
# If it's available, load the specialized feature generator. If this doesn't
# work, try building with bazel instead of running the Python script directly.
try:
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top
except ImportError:
frontend_op = None
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
"""Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
"""
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_wav_file(filename):
"""Loads an audio file and returns a float PCM-encoded array of samples.
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
return sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: filename}).audio.flatten()
def save_wav_file(filename, wav_data, sample_rate):
"""Saves audio sample data to a .wav audio file.
Args:
filename: Path to save the file to.
wav_data: 2D array of float PCM-encoded audio data.
sample_rate: Samples per second to encode in the file.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
sample_rate_placeholder = tf.compat.v1.placeholder(tf.int32, [])
wav_data_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 1])
wav_encoder = tf.audio.encode_wav(wav_data_placeholder,
sample_rate_placeholder)
wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
sess.run(
wav_saver,
feed_dict={
wav_filename_placeholder: filename,
sample_rate_placeholder: sample_rate,
wav_data_placeholder: np.reshape(wav_data, (-1, 1))
})
def get_features_range(model_settings):
"""Returns the expected min/max for generated features.
Args:
model_settings: Information about the current model being trained.
Returns:
Min/max float pair holding the range of features.
Raises:
Exception: If preprocessing mode isn't recognized.
"""
# TODO(petewarden): These values have been derived from the observed ranges
# of spectrogram and MFCC inputs. If the preprocessing pipeline changes,
# they may need to be updated.
if model_settings['preprocess'] == 'average':
features_min = 0.0
features_max = 127.5
elif model_settings['preprocess'] == 'mfcc':
features_min = -247.0
features_max = 30.0
elif model_settings['preprocess'] == 'micro':
features_min = 0.0
features_max = 26.0
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (model_settings['preprocess']))
return features_min, features_max
class AudioProcessor(object):
"""Handles loading, partitioning, and preparing audio training data."""
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
model_settings, summaries_dir):
if data_dir:
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
self.prepare_processing_graph(model_settings, summaries_dir)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
If the data set we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a
directory.
If the data_url is none, don't download anything and expect the data
directory to contain the correct files already.
Args:
data_url: Web location of the tar file containing the data set.
dest_directory: File path to extract data to.
"""
if not data_url:
return
if not gfile.Exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
try:
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
except:
tf.compat.v1.logging.error(
'Failed to download URL: {0} to folder: {1}. Please make sure you '
'have enough free space and an internet connection'.format(
data_url, filepath))
raise
print()
statinfo = os.stat(filepath)
tf.compat.v1.logging.info(
'Successfully downloaded {0} ({1} bytes)'.format(
filename, statinfo.st_size))
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def prepare_data_index(self, silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage):
"""Prepares a list of the samples organized by set and label.
The training loop needs a list of all the available data, organized by
which partition it should belong to, and with ground truth labels attached.
This function analyzes the folders below the `data_dir`, figures out the
right
labels for each file based on the name of the subdirectory it belongs to,
and uses a stable hash to assign it to a data set partition.
Args:
silence_percentage: How much of the resulting data should be background.
unknown_percentage: How much should be audio outside the wanted classes.
wanted_words: Labels of the classes we want to be able to recognize.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
Dictionary containing a list of file information for each set partition,
and a lookup map for each class to determine its numeric index.
Raises:
Exception: If expected files are not found.
"""
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(wanted_words):
wanted_words_index[wanted_word] = index + 2
self.data_index = {'validation': [], 'testing': [], 'training': []}
unknown_index = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Look through all the subfolders to find audio samples
search_path = os.path.join(self.data_dir, '*', '*.wav')
for wav_path in gfile.Glob(search_path):
_, word = os.path.split(os.path.dirname(wav_path))
word = word.lower()
# Treat the '_background_noise_' folder as a special case, since we expect
# it to contain long audio samples we mix in to improve training.
if word == BACKGROUND_NOISE_DIR_NAME:
continue
all_words[word] = True
set_index = which_set(wav_path, validation_percentage, testing_percentage)
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
if word in wanted_words_index:
self.data_index[set_index].append({'label': word, 'file': wav_path})
else:
unknown_index[set_index].append({'label': word, 'file': wav_path})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(wanted_words):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_index['training'][0]['file']
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_index[set_index])
silence_size = int(math.ceil(set_size * silence_percentage / 100))
for _ in range(silence_size):
self.data_index[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_index[set_index])
unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_index[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(wanted_words)
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def prepare_background_data(self):
"""Searches a folder for background noise audio, and loads it into memory.
It's expected that the background audio samples will be in a subdirectory
named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
the sample rate of the training data, but can be much longer in duration.
If the '_background_noise_' folder doesn't exist at all, this isn't an
error, it's just taken to mean that no background noise augmentation should
be used. If the folder does exist, but it's empty, that's treated as an
error.
Returns:
List of raw PCM-encoded audio samples of background noise.
Raises:
Exception: If files aren't found in the folder.
"""
self.background_data = []
background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
if not gfile.Exists(background_dir):
return self.background_data
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
'*.wav')
for wav_path in gfile.Glob(search_path):
wav_data = sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
self.background_data.append(wav_data)
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
def prepare_processing_graph(self, model_settings, summaries_dir):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
shifts it in time, adds in background noise, calculates a spectrogram, and
then builds an MFCC fingerprint from that.
This must be called with an active TensorFlow session running, and it
creates multiple placeholder inputs, and one output:
- wav_filename_placeholder_: Filename of the WAV to load.
- foreground_volume_placeholder_: How loud the main clip should be.
- time_shift_padding_placeholder_: Where to pad the clip.
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- output_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
summaries_dir: Path to save training summary information to.
Raises:
ValueError: If the preprocessing mode isn't recognized.
Exception: If the preprocessor wasn't compiled in.
"""
with tf.compat.v1.get_default_graph().name_scope('data'):
desired_samples = model_settings['desired_samples']
self.wav_filename_placeholder_ = tf.compat.v1.placeholder(
tf.string, [], name='wav_filename')
wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
# Allow the audio sample's volume to be adjusted.
self.foreground_volume_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [], name='foreground_volume')
scaled_foreground = tf.multiply(wav_decoder.audio,
self.foreground_volume_placeholder_)
# Shift the sample's start position, and pad any gaps with zeros.
self.time_shift_padding_placeholder_ = tf.compat.v1.placeholder(
tf.int32, [2, 2], name='time_shift_padding')
self.time_shift_offset_placeholder_ = tf.compat.v1.placeholder(
tf.int32, [2], name='time_shift_offset')
padded_foreground = tf.pad(
tensor=scaled_foreground,
paddings=self.time_shift_padding_placeholder_,
mode='CONSTANT')
sliced_foreground = tf.slice(padded_foreground,
self.time_shift_offset_placeholder_,
[desired_samples, -1])
# Mix in background noise.
self.background_data_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [desired_samples, 1], name='background_data')
self.background_volume_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [], name='background_volume')
background_mul = tf.multiply(self.background_data_placeholder_,
self.background_volume_placeholder_)
background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
# Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
spectrogram = audio_ops.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
tf.compat.v1.summary.image(
'spectrogram', tf.expand_dims(spectrogram, -1), max_outputs=1)
# The number of buckets in each FFT row in the spectrogram will depend on
# how many input samples there are in each window. This can be quite
# large, with a 160 sample window producing 127 buckets for example. We
# don't need this level of detail for classification, so we often want to
# shrink them down to produce a smaller result. That's what this section
# implements. One method is to use average pooling to merge adjacent
# buckets, but a more sophisticated approach is to apply the MFCC
# algorithm to shrink the representation.
if model_settings['preprocess'] == 'average':
self.output_ = tf.nn.pool(
input=tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
tf.compat.v1.summary.image('shrunk_spectrogram',
self.output_,
max_outputs=1)
elif model_settings['preprocess'] == 'mfcc':
self.output_ = audio_ops.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
tf.compat.v1.summary.image(
'mfcc', tf.expand_dims(self.output_, -1), max_outputs=1)
elif model_settings['preprocess'] == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running'
' TensorFlow directly from Python, you need to build and run'
' through Bazel')
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(tf.multiply(background_clamp, 32768), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
self.output_ = tf.multiply(micro_frontend, (10.0 / 256.0))
tf.compat.v1.summary.image(
'micro',
tf.expand_dims(tf.expand_dims(self.output_, -1), 0),
max_outputs=1)
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc", '
' "average", or "micro")' %
(model_settings['preprocess']))
# Merge all the summaries and write them out to /tmp/retrain_logs (by
# default)
self.merged_summaries_ = tf.compat.v1.summary.merge_all(scope='data')
if summaries_dir:
self.summary_writer_ = tf.compat.v1.summary.FileWriter(
summaries_dir + '/data', tf.compat.v1.get_default_graph())
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
Args:
mode: Which partition, must be 'training', 'validation', or 'testing'.
Returns:
Number of samples in the partition.
"""
return len(self.data_index[mode])
def get_data(self, how_many, offset, model_settings, background_frequency,
background_volume_range, time_shift, mode, sess):
"""Gather samples from the data set, applying transformations as needed.
When the mode is 'training', a random selection of samples will be returned,
otherwise the first N clips in the partition will be used. This ensures that
validation always uses the same samples, reducing noise in the metrics.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
offset: Where to start when fetching deterministically.
model_settings: Information about the current model being trained.
background_frequency: How many clips will have background noise, 0.0 to
1.0.
background_volume_range: How loud the background noise will be.
time_shift: How much to randomly shift the clips by in time.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
sess: TensorFlow session that was active when processor was created.
Returns:
List of sample data for the transformed samples, and list of label indexes
Raises:
ValueError: If background samples are too short.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = max(0, min(how_many, len(candidates) - offset))
# Data and labels will be populated and returned.
data = np.zeros((sample_count, model_settings['fingerprint_size']))
labels = np.zeros(sample_count)
desired_samples = model_settings['desired_samples']
use_background = self.background_data and (mode == 'training')
pick_deterministically = (mode != 'training')
# Use the processing graph we created earlier to repeatedly to generate the
# final output sample data we'll use in training.
for i in xrange(offset, offset + sample_count):
# Pick which audio sample to use.
if how_many == -1 or pick_deterministically:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# If we're time shifting, set up the offset for this sample.
if time_shift > 0:
time_shift_amount = np.random.randint(-time_shift, time_shift)
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
input_dict = {
self.wav_filename_placeholder_: sample['file'],
self.time_shift_padding_placeholder_: time_shift_padding,
self.time_shift_offset_placeholder_: time_shift_offset,
}
# Choose a section of background noise to mix in.
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
if len(background_samples) <= model_settings['desired_samples']:
raise ValueError(
'Background sample is too short! Need more than %d'
' samples but only %d were found' %
(model_settings['desired_samples'], len(background_samples)))
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
background_offset + desired_samples)]
background_reshaped = background_clipped.reshape([desired_samples, 1])
if sample['label'] == SILENCE_LABEL:
background_volume = np.random.uniform(0, 1)
elif np.random.uniform(0, 1) < background_frequency:
background_volume = np.random.uniform(0, background_volume_range)
else:
background_volume = 0
else:
background_reshaped = np.zeros([desired_samples, 1])
background_volume = 0
input_dict[self.background_data_placeholder_] = background_reshaped
input_dict[self.background_volume_placeholder_] = background_volume
# If we want silence, mute out the main sample but leave the background.
if sample['label'] == SILENCE_LABEL:
input_dict[self.foreground_volume_placeholder_] = 0
else:
input_dict[self.foreground_volume_placeholder_] = 1
# Run the graph to produce the output audio.
summary, data_tensor = sess.run(
[self.merged_summaries_, self.output_], feed_dict=input_dict)
self.summary_writer_.add_summary(summary)
data[i - offset, :] = data_tensor.flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset] = label_index
return data, labels
def get_features_for_wav(self, wav_filename, model_settings, sess):
"""Applies the feature transformation process to the input_wav.
Runs the feature generation process (generally producing a spectrogram from
the input samples) on the WAV file. This can be useful for testing and
verifying implementations being run on other platforms.
Args:
wav_filename: The path to the input audio file.
model_settings: Information about the current model being trained.
sess: TensorFlow session that was active when processor was created.
Returns:
Numpy data array containing the generated features.
"""
desired_samples = model_settings['desired_samples']
input_dict = {
self.wav_filename_placeholder_: wav_filename,
self.time_shift_padding_placeholder_: [[0, 0], [0, 0]],
self.time_shift_offset_placeholder_: [0, 0],
self.background_data_placeholder_: np.zeros([desired_samples, 1]),
self.background_volume_placeholder_: 0,
self.foreground_volume_placeholder_: 1,
}
# Run the graph to produce the output audio.
data_tensor = sess.run([self.output_], feed_dict=input_dict)
return data_tensor
def get_unprocessed_data(self, how_many, model_settings, mode):
"""Retrieve sample data for the given partition, with no transformations.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
model_settings: Information about the current model being trained.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
Returns:
List of sample data for the samples, and list of labels in one-hot form.
"""
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = how_many
desired_samples = model_settings['desired_samples']
words_list = self.words_list
data = np.zeros((sample_count, desired_samples))
labels = []
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
foreground_volume_placeholder = tf.compat.v1.placeholder(tf.float32, [])
scaled_foreground = tf.multiply(wav_decoder.audio,
foreground_volume_placeholder)
for i in range(sample_count):
if how_many == -1:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
input_dict = {wav_filename_placeholder: sample['file']}
if sample['label'] == SILENCE_LABEL:
input_dict[foreground_volume_placeholder] = 0
else:
input_dict[foreground_volume_placeholder] = 1
data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()
label_index = self.word_to_index[sample['label']]
labels.append(words_list[label_index])
return data, labels
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/input_data.py
|
Python
|
apache-2.0
| 30,509
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.examples.speech_commands import input_data
from tensorflow.examples.speech_commands import models
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class InputDataTest(test.TestCase):
def _getWavData(self):
with self.cached_session():
sample_data = tf.zeros([32000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
def _model_settings(self):
return {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
def _runGetDataTest(self, preprocess, window_length_ms):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = models.prepare_model_settings(
4, 16000, 1000, window_length_ms, 20, 40, preprocess)
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_data(
10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
def testPrepareWordsList(self):
words_list = ["a", "b"]
self.assertGreater(
len(input_data.prepare_words_list(words_list)), len(words_list))
def testWhichSet(self):
self.assertEqual(
input_data.which_set("foo.wav", 10, 10),
input_data.which_set("foo.wav", 10, 10))
self.assertEqual(
input_data.which_set("foo_nohash_0.wav", 10, 10),
input_data.which_set("foo_nohash_1.wav", 10, 10))
@test_util.run_deprecated_v1
def testPrepareDataIndex(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertLess(0, audio_processor.set_size("training"))
self.assertIn("training", audio_processor.data_index)
self.assertIn("validation", audio_processor.data_index)
self.assertIn("testing", audio_processor.data_index)
self.assertEqual(input_data.UNKNOWN_WORD_INDEX,
audio_processor.word_to_index["c"])
def testPrepareDataIndexEmpty(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertIn("No .wavs found", str(e.exception))
def testPrepareDataIndexMissing(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10,
10, self._model_settings(), tmp_dir)
self.assertIn("Expected to find", str(e.exception))
@test_util.run_deprecated_v1
def testPrepareBackgroundData(self):
tmp_dir = self.get_temp_dir()
background_dir = os.path.join(tmp_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertEqual(10, len(audio_processor.background_data))
def testLoadWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
wav_data = self._getWavData()
self._saveTestWavFile(file_path, wav_data)
sample_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(sample_data)
def testSaveWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
save_data = np.zeros([16000, 1])
input_data.save_wav_file(file_path, save_data, 16000)
loaded_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(loaded_data)
self.assertEqual(16000, len(loaded_data))
@test_util.run_deprecated_v1
def testPrepareProcessingGraph(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
self.assertIsNotNone(audio_processor.wav_filename_placeholder_)
self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)
self.assertIsNotNone(audio_processor.background_data_placeholder_)
self.assertIsNotNone(audio_processor.background_volume_placeholder_)
self.assertIsNotNone(audio_processor.output_)
@test_util.run_deprecated_v1
def testGetDataAverage(self):
self._runGetDataTest("average", 10)
@test_util.run_deprecated_v1
def testGetDataAverageLongWindow(self):
self._runGetDataTest("average", 30)
@test_util.run_deprecated_v1
def testGetDataMfcc(self):
self._runGetDataTest("mfcc", 30)
@test_util.run_deprecated_v1
def testGetDataMicro(self):
self._runGetDataTest("micro", 20)
@test_util.run_deprecated_v1
def testGetUnprocessedData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_unprocessed_data(
10, model_settings, "training")
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
@test_util.run_deprecated_v1
def testGetFeaturesForWav(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 1)
desired_samples = 1600
model_settings = {
"desired_samples": desired_samples,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"average_window_width": 6,
"preprocess": "average",
}
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
sample_data = np.zeros([desired_samples, 1])
for i in range(desired_samples):
phase = i % 4
if phase == 0:
sample_data[i, 0] = 0
elif phase == 1:
sample_data[i, 0] = -1
elif phase == 2:
sample_data[i, 0] = 0
elif phase == 3:
sample_data[i, 0] = 1
test_wav_path = os.path.join(tmp_dir, "test_wav.wav")
input_data.save_wav_file(test_wav_path, sample_data, 16000)
results = audio_processor.get_features_for_wav(test_wav_path,
model_settings, sess)
spectrogram = results[0]
self.assertEqual(1, spectrogram.shape[0])
self.assertEqual(16, spectrogram.shape[1])
self.assertEqual(11, spectrogram.shape[2])
self.assertNear(0, spectrogram[0, 0, 0], 0.1)
self.assertNear(200, spectrogram[0, 0, 5], 0.1)
def testGetFeaturesRange(self):
model_settings = {
"preprocess": "average",
}
features_min, _ = input_data.get_features_range(model_settings)
self.assertNear(0.0, features_min, 1e-5)
def testGetMfccFeaturesRange(self):
model_settings = {
"preprocess": "mfcc",
}
features_min, features_max = input_data.get_features_range(model_settings)
self.assertLess(features_min, features_max)
if __name__ == "__main__":
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/input_data_test.py
|
Python
|
apache-2.0
| 11,134
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <fstream>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::int32;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::Tensor;
using tensorflow::tstring;
namespace {
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(const string& graph_file_name,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status =
ReadBinaryProto(tensorflow::Env::Default(), graph_file_name, &graph_def);
if (!load_graph_status.ok()) {
return tensorflow::errors::NotFound("Failed to load compute graph at '",
graph_file_name, "'");
}
session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {
return session_create_status;
}
return Status::OK();
}
// Takes a file name, and loads a list of labels from it, one per line, and
// returns a vector of the strings.
Status ReadLabelsFile(const string& file_name, std::vector<string>* result) {
std::ifstream file(file_name);
if (!file) {
return tensorflow::errors::NotFound("Labels file ", file_name,
" not found.");
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
return Status::OK();
}
// Analyzes the output of the graph to retrieve the highest scores and
// their positions in the tensor.
void GetTopLabels(const std::vector<Tensor>& outputs, int how_many_labels,
Tensor* out_indices, Tensor* out_scores) {
const Tensor& unsorted_scores_tensor = outputs[0];
auto unsorted_scores_flat = unsorted_scores_tensor.flat<float>();
std::vector<std::pair<int, float>> scores;
scores.reserve(unsorted_scores_flat.size());
for (int i = 0; i < unsorted_scores_flat.size(); ++i) {
scores.push_back(std::pair<int, float>({i, unsorted_scores_flat(i)}));
}
std::sort(scores.begin(), scores.end(),
[](const std::pair<int, float>& left,
const std::pair<int, float>& right) {
return left.second > right.second;
});
scores.resize(how_many_labels);
Tensor sorted_indices(tensorflow::DT_INT32, {how_many_labels});
Tensor sorted_scores(tensorflow::DT_FLOAT, {how_many_labels});
for (int i = 0; i < scores.size(); ++i) {
sorted_indices.flat<int>()(i) = scores[i].first;
sorted_scores.flat<float>()(i) = scores[i].second;
}
*out_indices = sorted_indices;
*out_scores = sorted_scores;
}
} // namespace
int main(int argc, char* argv[]) {
string wav = "";
string graph = "";
string labels = "";
string input_name = "wav_data";
string output_name = "labels_softmax";
int32 how_many_labels = 3;
std::vector<Flag> flag_list = {
Flag("wav", &wav, "audio file to be identified"),
Flag("graph", &graph, "model to be executed"),
Flag("labels", &labels, "path to file containing labels"),
Flag("input_name", &input_name, "name of input node in model"),
Flag("output_name", &output_name, "name of output node in model"),
Flag("how_many_labels", &how_many_labels, "number of results to show"),
};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
// We need to call this to set up global state for TensorFlow.
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
// First we load and initialize the model.
std::unique_ptr<tensorflow::Session> session;
Status load_graph_status = LoadGraph(graph, &session);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return -1;
}
std::vector<string> labels_list;
Status read_labels_status = ReadLabelsFile(labels, &labels_list);
if (!read_labels_status.ok()) {
LOG(ERROR) << read_labels_status;
return -1;
}
string wav_string;
Status read_wav_status = tensorflow::ReadFileToString(
tensorflow::Env::Default(), wav, &wav_string);
if (!read_wav_status.ok()) {
LOG(ERROR) << read_wav_status;
return -1;
}
Tensor wav_tensor(tensorflow::DT_STRING, tensorflow::TensorShape({}));
wav_tensor.scalar<tstring>()() = wav_string;
// Actually run the audio through the model.
std::vector<Tensor> outputs;
Status run_status =
session->Run({{input_name, wav_tensor}}, {output_name}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return -1;
}
Tensor indices;
Tensor scores;
GetTopLabels(outputs, how_many_labels, &indices, &scores);
tensorflow::TTypes<float>::Flat scores_flat = scores.flat<float>();
tensorflow::TTypes<int32>::Flat indices_flat = indices.flat<int32>();
for (int pos = 0; pos < how_many_labels; ++pos) {
const int label_index = indices_flat(pos);
const float score = scores_flat(pos);
LOG(INFO) << labels_list[label_index] << " (" << label_index
<< "): " << score;
}
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/label_wav.cc
|
C++
|
apache-2.0
| 6,362
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a trained audio graph against a WAVE file and reports the results.
The model, labels and .wav file specified in the arguments will be loaded, and
then the predictions from running the model against the audio data will be
printed to the console. This is a useful script for sanity checking trained
models, and as an example of how to use an audio model from Python.
Here's an example of running it:
python tensorflow/examples/speech_commands/label_wav.py \
--graph=/tmp/my_frozen_graph.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--wav=/tmp/speech_dataset/left/a5d485dc_nohash_0.wav
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
FLAGS = None
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.io.gfile.GFile(filename, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.io.gfile.GFile(filename)]
def run_graph(wav_data, labels, input_layer_name, output_layer_name,
num_top_predictions):
"""Runs the audio data through the graph and prints predictions."""
with tf.compat.v1.Session() as sess:
# Feed the audio data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
# Sort to show labels in order of confidence
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
return 0
def label_wav(wav, labels, graph, input_name, output_name, how_many_labels):
"""Loads the model and labels, and runs the inference to print predictions."""
if not wav or not tf.io.gfile.exists(wav):
raise ValueError('Audio file does not exist at {0}'.format(wav))
if not labels or not tf.io.gfile.exists(labels):
raise ValueError('Labels file does not exist at {0}'.format(labels))
if not graph or not tf.io.gfile.exists(graph):
raise ValueError('Graph file does not exist at {0}'.format(graph))
labels_list = load_labels(labels)
# load graph, which is stored in the default session
load_graph(graph)
with open(wav, 'rb') as wav_file:
wav_data = wav_file.read()
run_graph(wav_data, labels_list, input_name, output_name, how_many_labels)
def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wav', type=str, default='', help='Audio file to be identified.')
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--labels', type=str, default='', help='Path to file containing labels.')
parser.add_argument(
'--input_name',
type=str,
default='wav_data:0',
help='Name of WAVE data input node in model.')
parser.add_argument(
'--output_name',
type=str,
default='labels_softmax:0',
help='Name of node outputting a prediction in the model.')
parser.add_argument(
'--how_many_labels',
type=int,
default=3,
help='Number of results to show.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/label_wav.py
|
Python
|
apache-2.0
| 4,614
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a trained audio graph against WAVE files and reports the results.
The model, labels and .wav files specified in the arguments will be loaded, and
then the predictions from running the model against the audio data will be
printed to the console. This is a useful script for sanity checking trained
models, and as an example of how to use an audio model from Python.
Here's an example of running it:
python tensorflow/examples/speech_commands/label_wav_dir.py \
--graph=/tmp/my_frozen_graph.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--wav_dir=/tmp/speech_dataset/left
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import sys
import tensorflow as tf
FLAGS = None
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.io.gfile.GFile(filename, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.io.gfile.GFile(filename)]
def run_graph(wav_dir, labels, input_layer_name, output_layer_name,
num_top_predictions):
"""Runs the audio data through the graph and prints predictions."""
with tf.compat.v1.Session() as sess:
# Feed the audio data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
for wav_path in glob.glob(wav_dir + '/*.wav'):
if not wav_path or not tf.io.gfile.exists(wav_path):
raise ValueError('Audio file does not exist at {0}'.format(wav_path))
with open(wav_path, 'rb') as wav_file:
wav_data = wav_file.read()
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
# Sort to show labels in order of confidence
print('\n%s' % (wav_path.split('/')[-1]))
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
return 0
def label_wav(wav_dir, labels, graph, input_name, output_name, how_many_labels):
"""Loads the model and labels, and runs the inference to print predictions."""
if not labels or not tf.io.gfile.exists(labels):
raise ValueError('Labels file does not exist at {0}'.format(labels))
if not graph or not tf.io.gfile.exists(graph):
raise ValueError('Graph file does not exist at {0}'.format(graph))
labels_list = load_labels(labels)
# load graph, which is stored in the default session
load_graph(graph)
run_graph(wav_dir, labels_list, input_name, output_name, how_many_labels)
def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav_dir, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wav_dir', type=str, default='', help='Audio file to be identified.')
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--labels', type=str, default='', help='Path to file containing labels.')
parser.add_argument(
'--input_name',
type=str,
default='wav_data:0',
help='Name of WAVE data input node in model.')
parser.add_argument(
'--output_name',
type=str,
default='labels_softmax:0',
help='Name of node outputting a prediction in the model.')
parser.add_argument(
'--how_many_labels',
type=int,
default=3,
help='Number of results to show.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/label_wav_dir.py
|
Python
|
apache-2.0
| 4,773
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WAVE file labeling tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.examples.speech_commands import label_wav
from tensorflow.python.platform import test
class LabelWavTest(test.TestCase):
def _getWavData(self):
with self.cached_session():
sample_data = tf.zeros([1000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def testLabelWav(self):
tmp_dir = self.get_temp_dir()
wav_data = self._getWavData()
wav_filename = os.path.join(tmp_dir, "wav_file.wav")
self._saveTestWavFile(wav_filename, wav_data)
input_name = "test_input"
output_name = "test_output"
graph_filename = os.path.join(tmp_dir, "test_graph.pb")
with tf.compat.v1.Session() as sess:
tf.compat.v1.placeholder(tf.string, name=input_name)
tf.zeros([1, 3], name=output_name)
with open(graph_filename, "wb") as f:
f.write(sess.graph.as_graph_def().SerializeToString())
labels_filename = os.path.join(tmp_dir, "test_labels.txt")
with open(labels_filename, "w") as f:
f.write("a\nb\nc\n")
label_wav.label_wav(wav_filename, labels_filename, graph_filename,
input_name + ":0", output_name + ":0", 3)
if __name__ == "__main__":
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/label_wav_test.py
|
Python
|
apache-2.0
| 2,233
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def _next_power_of_two(x):
"""Calculates the smallest enclosing power of two for an input.
Args:
x: Positive float or integer number.
Returns:
Next largest power of two integer.
"""
return 1 if x == 0 else 2**(int(x) - 1).bit_length()
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms, feature_bin_count,
preprocess):
"""Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
feature_bin_count: Number of frequency bins to use for analysis.
preprocess: How the spectrogram is processed to produce features.
Returns:
Dictionary containing common settings.
Raises:
ValueError: If the preprocessing mode isn't recognized.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
if preprocess == 'average':
fft_bin_count = 1 + (_next_power_of_two(window_size_samples) / 2)
average_window_width = int(math.floor(fft_bin_count / feature_bin_count))
fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))
elif preprocess == 'mfcc':
average_window_width = -1
fingerprint_width = feature_bin_count
elif preprocess == 'micro':
average_window_width = -1
fingerprint_width = feature_bin_count
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (preprocess))
fingerprint_size = fingerprint_width * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
'fingerprint_width': fingerprint_width,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
'preprocess': preprocess,
'average_window_width': average_window_width,
}
def create_model(fingerprint_input, model_settings, model_architecture,
is_training, runtime_settings=None):
"""Builds a model of the requested architecture compatible with the settings.
There are many possible ways of deriving predictions from a spectrogram
input, so this function provides an abstract interface for creating different
kinds of models in a black-box way. You need to pass in a TensorFlow node as
the 'fingerprint' input, and this should output a batch of 1D features that
describe the audio. Typically this will be derived from a spectrogram that's
been run through an MFCC, but in theory it can be any feature vector of the
size specified in model_settings['fingerprint_size'].
The function will build the graph it needs in the current TensorFlow graph,
and return the tensorflow output that will contain the 'logits' input to the
softmax prediction process. If training flag is on, it will also return a
placeholder node that can be used to control the dropout amount.
See the implementations below for the possible model architectures that can be
requested.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
model_architecture: String specifying which kind of model to create.
is_training: Whether the model is going to be used for training.
runtime_settings: Dictionary of information about the runtime.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
Exception: If the architecture type isn't recognized.
"""
if model_architecture == 'single_fc':
return create_single_fc_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'conv':
return create_conv_model(fingerprint_input, model_settings, is_training)
elif model_architecture == 'low_latency_conv':
return create_low_latency_conv_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'low_latency_svdf':
return create_low_latency_svdf_model(fingerprint_input, model_settings,
is_training, runtime_settings)
elif model_architecture == 'tiny_conv':
return create_tiny_conv_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'tiny_embedding_conv':
return create_tiny_embedding_conv_model(fingerprint_input, model_settings,
is_training)
else:
raise Exception('model_architecture argument "' + model_architecture +
'" not recognized, should be one of "single_fc", "conv",' +
' "low_latency_conv, "low_latency_svdf",' +
' "tiny_conv", or "tiny_embedding_conv"')
def load_variables_from_checkpoint(sess, start_checkpoint):
"""Utility function to centralize checkpoint restoration.
Args:
sess: TensorFlow session.
start_checkpoint: Path to saved checkpoint on disk.
"""
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
saver.restore(sess, start_checkpoint)
def create_single_fc_model(fingerprint_input, model_settings, is_training):
"""Builds a model with a single hidden fully-connected layer.
This is a very simple model with just one matmul and bias layer. As you'd
expect, it doesn't produce very accurate results, but it is very fast and
simple, so it's useful for sanity testing.
Here's the layout of the graph:
(fingerprint_input)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
weights = tf.compat.v1.get_variable(
name='weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.001),
shape=[fingerprint_size, label_count])
bias = tf.compat.v1.get_variable(name='bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
logits = tf.matmul(fingerprint_input, weights) + bias
if is_training:
return logits, dropout_rate
else:
return logits
def create_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a standard convolutional model.
This is roughly the network labeled as 'cnn-trad-fpool3' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces fairly good quality results, but can involve a large number of
weight parameters and computations. For a cheaper alternative from the same
paper with slightly less accuracy, see 'low_latency_conv' below.
During training, dropout nodes are introduced after each relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 20
first_filter_count = 64
first_weights = tf.compat.v1.get_variable(
name='first_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.compat.v1.get_variable(
name='first_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_filter_count])
first_conv = tf.nn.conv2d(input=fingerprint_4d,
filters=first_weights,
strides=[1, 1, 1, 1],
padding='SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
else:
first_dropout = first_relu
max_pool = tf.nn.max_pool2d(input=first_dropout,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
second_filter_width = 4
second_filter_height = 10
second_filter_count = 64
second_weights = tf.compat.v1.get_variable(
name='second_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[
second_filter_height, second_filter_width, first_filter_count,
second_filter_count
])
second_bias = tf.compat.v1.get_variable(
name='second_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[second_filter_count])
second_conv = tf.nn.conv2d(input=max_pool,
filters=second_weights,
strides=[1, 1, 1, 1],
padding='SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
if is_training:
second_dropout = tf.nn.dropout(second_relu, rate=dropout_rate)
else:
second_dropout = second_relu
second_conv_shape = second_dropout.get_shape()
second_conv_output_width = second_conv_shape[2]
second_conv_output_height = second_conv_shape[1]
second_conv_element_count = int(
second_conv_output_width * second_conv_output_height *
second_filter_count)
flattened_second_conv = tf.reshape(second_dropout,
[-1, second_conv_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.compat.v1.get_variable(
name='final_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[second_conv_element_count, label_count])
final_fc_bias = tf.compat.v1.get_variable(
name='final_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_rate
else:
return final_fc
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 1
first_weights = tf.compat.v1.get_variable(
name='first_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.compat.v1.get_variable(
name='first_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_filter_count])
first_conv = tf.nn.conv2d(
input=fingerprint_4d,
filters=first_weights,
strides=[1, first_filter_stride_y, first_filter_stride_x, 1],
padding='VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.compat.v1.get_variable(
name='first_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_conv_element_count, first_fc_output_channels])
first_fc_bias = tf.compat.v1.get_variable(
name='first_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_fc_output_channels])
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, rate=dropout_rate)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.compat.v1.get_variable(
name='second_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_fc_output_channels, second_fc_output_channels])
second_fc_bias = tf.compat.v1.get_variable(
name='second_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, rate=dropout_rate)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.compat.v1.get_variable(
name='final_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[second_fc_output_channels, label_count])
final_fc_bias = tf.compat.v1.get_variable(
name='final_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_rate
else:
return final_fc
def create_low_latency_svdf_model(fingerprint_input, model_settings,
is_training, runtime_settings):
"""Builds an SVDF model with low compute requirements.
This is based in the topology presented in the 'Compressing Deep Neural
Networks using a Rank-Constrained Topology' paper:
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43813.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[SVDF]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This model produces lower recognition accuracy than the 'conv' model above,
but requires fewer weight parameters and, significantly fewer computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
The node is expected to produce a 2D Tensor of shape:
[batch, model_settings['fingerprint_width'] *
model_settings['spectrogram_length']]
with the features corresponding to the same time slot arranged contiguously,
and the oldest slot at index [:, 0], and newest at [:, -1].
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
runtime_settings: Dictionary of information about the runtime.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
ValueError: If the inputs tensor is incorrectly shaped.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
# Validation.
input_shape = fingerprint_input.get_shape()
if len(input_shape) != 2:
raise ValueError('Inputs to `SVDF` should have rank == 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the input to `SVDF` '
'should be defined. Found `None`.')
if input_shape[-1].value % input_frequency_size != 0:
raise ValueError('The last dimension of the input to `SVDF` = {0} must be '
'a multiple of the frame size = {1}'.format(
input_shape.shape[-1].value, input_frequency_size))
# Set number of units (i.e. nodes) and rank.
rank = 2
num_units = 1280
# Number of filters: pairs of feature and time filters.
num_filters = rank * num_units
# Create the runtime memory: [num_filters, batch, input_time_size]
batch = 1
memory = tf.compat.v1.get_variable(
initializer=tf.compat.v1.zeros_initializer,
shape=[num_filters, batch, input_time_size],
trainable=False,
name='runtime-memory')
first_time_flag = tf.compat.v1.get_variable(
name='first_time_flag', dtype=tf.int32, initializer=1)
# Determine the number of new frames in the input, such that we only operate
# on those. For training we do not use the memory, and thus use all frames
# provided in the input.
# new_fingerprint_input: [batch, num_new_frames*input_frequency_size]
if is_training:
num_new_frames = input_time_size
else:
window_stride_ms = int(model_settings['window_stride_samples'] * 1000 /
model_settings['sample_rate'])
num_new_frames = tf.cond(
pred=tf.equal(first_time_flag, 1),
true_fn=lambda: input_time_size,
false_fn=lambda: int(runtime_settings['clip_stride_ms'] / window_stride_ms)) # pylint:disable=line-too-long
first_time_flag = 0
new_fingerprint_input = fingerprint_input[
:, -num_new_frames*input_frequency_size:]
# Expand to add input channels dimension.
new_fingerprint_input = tf.expand_dims(new_fingerprint_input, 2)
# Create the frequency filters.
weights_frequency = tf.compat.v1.get_variable(
name='weights_frequency',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[input_frequency_size, num_filters])
# Expand to add input channels dimensions.
# weights_frequency: [input_frequency_size, 1, num_filters]
weights_frequency = tf.expand_dims(weights_frequency, 1)
# Convolve the 1D feature filters sliding over the time dimension.
# activations_time: [batch, num_new_frames, num_filters]
activations_time = tf.nn.conv1d(input=new_fingerprint_input,
filters=weights_frequency,
stride=input_frequency_size,
padding='VALID')
# Rearrange such that we can perform the batched matmul.
# activations_time: [num_filters, batch, num_new_frames]
activations_time = tf.transpose(a=activations_time, perm=[2, 0, 1])
# Runtime memory optimization.
if not is_training:
# We need to drop the activations corresponding to the oldest frames, and
# then add those corresponding to the new frames.
new_memory = memory[:, :, num_new_frames:]
new_memory = tf.concat([new_memory, activations_time], 2)
tf.compat.v1.assign(memory, new_memory)
activations_time = new_memory
# Create the time filters.
weights_time = tf.compat.v1.get_variable(
name='weights_time',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[num_filters, input_time_size])
# Apply the time filter on the outputs of the feature filters.
# weights_time: [num_filters, input_time_size, 1]
# outputs: [num_filters, batch, 1]
weights_time = tf.expand_dims(weights_time, 2)
outputs = tf.matmul(activations_time, weights_time)
# Split num_units and rank into separate dimensions (the remaining
# dimension is the input_shape[0] -i.e. batch size). This also squeezes
# the last dimension, since it's not used.
# [num_filters, batch, 1] => [num_units, rank, batch]
outputs = tf.reshape(outputs, [num_units, rank, -1])
# Sum the rank outputs per unit => [num_units, batch].
units_output = tf.reduce_sum(input_tensor=outputs, axis=1)
# Transpose to shape [batch, num_units]
units_output = tf.transpose(a=units_output)
# Appy bias.
bias = tf.compat.v1.get_variable(name='bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[num_units])
first_bias = tf.nn.bias_add(units_output, bias)
# Relu.
first_relu = tf.nn.relu(first_bias)
if is_training:
first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
else:
first_dropout = first_relu
first_fc_output_channels = 256
first_fc_weights = tf.compat.v1.get_variable(
name='first_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[num_units, first_fc_output_channels])
first_fc_bias = tf.compat.v1.get_variable(
name='first_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_fc_output_channels])
first_fc = tf.matmul(first_dropout, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, rate=dropout_rate)
else:
second_fc_input = first_fc
second_fc_output_channels = 256
second_fc_weights = tf.compat.v1.get_variable(
name='second_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_fc_output_channels, second_fc_output_channels])
second_fc_bias = tf.compat.v1.get_variable(
name='second_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, rate=dropout_rate)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.compat.v1.get_variable(
name='final_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[second_fc_output_channels, label_count])
final_fc_bias = tf.compat.v1.get_variable(
name='final_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_rate
else:
return final_fc
def create_tiny_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a convolutional model aimed at microcontrollers.
Devices like DSPs and microcontrollers can have very small amounts of
memory and limited processing power. This model is designed to use less
than 20KB of working RAM, and fit within 32KB of read-only (flash) memory.
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This doesn't produce particularly accurate results, but it's designed to be
used as the first stage of a pipeline, running on a low-energy piece of
hardware that can always be on, and then wake higher-power chips when a
possible utterance has been found, so that more accurate analysis can be done.
During training, a dropout node is introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 10
first_filter_count = 8
first_weights = tf.compat.v1.get_variable(
name='first_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.compat.v1.get_variable(
name='first_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_filter_count])
first_conv_stride_x = 2
first_conv_stride_y = 2
first_conv = tf.nn.conv2d(
input=fingerprint_4d, filters=first_weights,
strides=[1, first_conv_stride_y, first_conv_stride_x, 1],
padding='SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
else:
first_dropout = first_relu
first_dropout_shape = first_dropout.get_shape()
first_dropout_output_width = first_dropout_shape[2]
first_dropout_output_height = first_dropout_shape[1]
first_dropout_element_count = int(
first_dropout_output_width * first_dropout_output_height *
first_filter_count)
flattened_first_dropout = tf.reshape(first_dropout,
[-1, first_dropout_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.compat.v1.get_variable(
name='final_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_dropout_element_count, label_count])
final_fc_bias = tf.compat.v1.get_variable(
name='final_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
final_fc = (
tf.matmul(flattened_first_dropout, final_fc_weights) + final_fc_bias)
if is_training:
return final_fc, dropout_rate
else:
return final_fc
def create_tiny_embedding_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model aimed at microcontrollers.
Devices like DSPs and microcontrollers can have very small amounts of
memory and limited processing power. This model is designed to use less
than 20KB of working RAM, and fit within 32KB of read-only (flash) memory.
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This doesn't produce particularly accurate results, but it's designed to be
used as the first stage of a pipeline, running on a low-energy piece of
hardware that can always be on, and then wake higher-power chips when a
possible utterance has been found, so that more accurate analysis can be done.
During training, a dropout node is introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 10
first_filter_count = 8
first_weights = tf.compat.v1.get_variable(
name='first_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.compat.v1.get_variable(
name='first_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[first_filter_count])
first_conv_stride_x = 2
first_conv_stride_y = 2
first_conv = tf.nn.conv2d(
input=fingerprint_4d, filters=first_weights,
strides=[1, first_conv_stride_y, first_conv_stride_x, 1],
padding='SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
else:
first_dropout = first_relu
second_filter_width = 8
second_filter_height = 10
second_filter_count = 8
second_weights = tf.compat.v1.get_variable(
name='second_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[
second_filter_height, second_filter_width, first_filter_count,
second_filter_count
])
second_bias = tf.compat.v1.get_variable(
name='second_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[second_filter_count])
second_conv_stride_x = 8
second_conv_stride_y = 8
second_conv = tf.nn.conv2d(
input=first_dropout, filters=second_weights,
strides=[1, second_conv_stride_y, second_conv_stride_x, 1],
padding='SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
if is_training:
second_dropout = tf.nn.dropout(second_relu, rate=dropout_rate)
else:
second_dropout = second_relu
second_dropout_shape = second_dropout.get_shape()
second_dropout_output_width = second_dropout_shape[2]
second_dropout_output_height = second_dropout_shape[1]
second_dropout_element_count = int(second_dropout_output_width *
second_dropout_output_height *
second_filter_count)
flattened_second_dropout = tf.reshape(second_dropout,
[-1, second_dropout_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.compat.v1.get_variable(
name='final_fc_weights',
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
shape=[second_dropout_element_count, label_count])
final_fc_bias = tf.compat.v1.get_variable(
name='final_fc_bias',
initializer=tf.compat.v1.zeros_initializer,
shape=[label_count])
final_fc = (
tf.matmul(flattened_second_dropout, final_fc_weights) + final_fc_bias)
if is_training:
return final_fc, dropout_rate
else:
return final_fc
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/models.py
|
Python
|
apache-2.0
| 34,618
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for speech commands models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.speech_commands import models
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ModelsTest(test.TestCase):
def _modelSettings(self):
return models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc")
def testPrepareModelSettings(self):
self.assertIsNotNone(
models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc"))
@test_util.run_deprecated_v1
def testCreateModelConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_rate = models.create_model(
fingerprint_input, model_settings, "conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_rate)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_rate.name))
@test_util.run_deprecated_v1
def testCreateModelConvInference(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits = models.create_model(fingerprint_input, model_settings, "conv",
False)
self.assertIsNotNone(logits)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
@test_util.run_deprecated_v1
def testCreateModelLowLatencyConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_rate = models.create_model(
fingerprint_input, model_settings, "low_latency_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_rate)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_rate.name))
@test_util.run_deprecated_v1
def testCreateModelFullyConnectedTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_rate = models.create_model(
fingerprint_input, model_settings, "single_fc", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_rate)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_rate.name))
def testCreateModelBadArchitecture(self):
model_settings = self._modelSettings()
with self.cached_session():
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
with self.assertRaises(Exception) as e:
models.create_model(fingerprint_input, model_settings,
"bad_architecture", True)
self.assertIn("not recognized", str(e.exception))
@test_util.run_deprecated_v1
def testCreateModelTinyConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_rate = models.create_model(
fingerprint_input, model_settings, "tiny_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_rate)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_rate.name))
if __name__ == "__main__":
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/models_test.py
|
Python
|
apache-2.0
| 4,866
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/examples/speech_commands/recognize_commands.h"
namespace tensorflow {
RecognizeCommands::RecognizeCommands(const std::vector<string>& labels,
int32 average_window_duration_ms,
float detection_threshold,
int32 suppression_ms, int32 minimum_count)
: labels_(labels),
average_window_duration_ms_(average_window_duration_ms),
detection_threshold_(detection_threshold),
suppression_ms_(suppression_ms),
minimum_count_(minimum_count) {
labels_count_ = labels.size();
previous_top_label_ = "_silence_";
previous_top_label_time_ = std::numeric_limits<int64>::min();
}
Status RecognizeCommands::ProcessLatestResults(const Tensor& latest_results,
const int64 current_time_ms,
string* found_command,
float* score,
bool* is_new_command) {
if (latest_results.NumElements() != labels_count_) {
return errors::InvalidArgument(
"The results for recognition should contain ", labels_count_,
" elements, but there are ", latest_results.NumElements());
}
if ((!previous_results_.empty()) &&
(current_time_ms < previous_results_.front().first)) {
return errors::InvalidArgument(
"Results must be fed in increasing time order, but received a "
"timestamp of ",
current_time_ms, " that was earlier than the previous one of ",
previous_results_.front().first);
}
// Add the latest results to the head of the queue.
previous_results_.push_back({current_time_ms, latest_results});
// Prune any earlier results that are too old for the averaging window.
const int64 time_limit = current_time_ms - average_window_duration_ms_;
while (previous_results_.front().first < time_limit) {
previous_results_.pop_front();
}
// If there are too few results, assume the result will be unreliable and
// bail.
const int64 how_many_results = previous_results_.size();
const int64 earliest_time = previous_results_.front().first;
const int64 samples_duration = current_time_ms - earliest_time;
if ((how_many_results < minimum_count_) ||
(samples_duration < (average_window_duration_ms_ / 4))) {
*found_command = previous_top_label_;
*score = 0.0f;
*is_new_command = false;
return Status::OK();
}
// Calculate the average score across all the results in the window.
std::vector<float> average_scores(labels_count_);
for (const auto& previous_result : previous_results_) {
const Tensor& scores_tensor = previous_result.second;
auto scores_flat = scores_tensor.flat<float>();
for (int i = 0; i < scores_flat.size(); ++i) {
average_scores[i] += scores_flat(i) / how_many_results;
}
}
// Sort the averaged results in descending score order.
std::vector<std::pair<int, float>> sorted_average_scores;
sorted_average_scores.reserve(labels_count_);
for (int i = 0; i < labels_count_; ++i) {
sorted_average_scores.push_back(
std::pair<int, float>({i, average_scores[i]}));
}
std::sort(sorted_average_scores.begin(), sorted_average_scores.end(),
[](const std::pair<int, float>& left,
const std::pair<int, float>& right) {
return left.second > right.second;
});
// See if the latest top score is enough to trigger a detection.
const int current_top_index = sorted_average_scores[0].first;
const string current_top_label = labels_[current_top_index];
const float current_top_score = sorted_average_scores[0].second;
// If we've recently had another label trigger, assume one that occurs too
// soon afterwards is a bad result.
int64 time_since_last_top;
if ((previous_top_label_ == "_silence_") ||
(previous_top_label_time_ == std::numeric_limits<int64>::min())) {
time_since_last_top = std::numeric_limits<int64>::max();
} else {
time_since_last_top = current_time_ms - previous_top_label_time_;
}
if ((current_top_score > detection_threshold_) &&
(current_top_label != previous_top_label_) &&
(time_since_last_top > suppression_ms_)) {
previous_top_label_ = current_top_label;
previous_top_label_time_ = current_time_ms;
*is_new_command = true;
} else {
*is_new_command = false;
}
*found_command = current_top_label;
*score = current_top_score;
return Status::OK();
}
} // namespace tensorflow
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/recognize_commands.cc
|
C++
|
apache-2.0
| 5,282
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_RECOGNIZE_COMMANDS_H_
#define TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_RECOGNIZE_COMMANDS_H_
#include <deque>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
// This class is designed to apply a very primitive decoding model on top of the
// instantaneous results from running an audio recognition model on a single
// window of samples. It applies smoothing over time so that noisy individual
// label scores are averaged, increasing the confidence that apparent matches
// are real.
// To use it, you should create a class object with the configuration you
// want, and then feed results from running a TensorFlow model into the
// processing method. The timestamp for each subsequent call should be
// increasing from the previous, since the class is designed to process a stream
// of data over time.
class RecognizeCommands {
public:
// labels should be a list of the strings associated with each one-hot score.
// The window duration controls the smoothing. Longer durations will give a
// higher confidence that the results are correct, but may miss some commands.
// The detection threshold has a similar effect, with high values increasing
// the precision at the cost of recall. The minimum count controls how many
// results need to be in the averaging window before it's seen as a reliable
// average. This prevents erroneous results when the averaging window is
// initially being populated for example. The suppression argument disables
// further recognitions for a set time after one has been triggered, which can
// help reduce spurious recognitions.
explicit RecognizeCommands(const std::vector<string>& labels,
int32 average_window_duration_ms = 1000,
float detection_threshold = 0.2,
int32 suppression_ms = 500,
int32 minimum_count = 3);
// Call this with the results of running a model on sample data.
Status ProcessLatestResults(const Tensor& latest_results,
const int64 current_time_ms,
string* found_command, float* score,
bool* is_new_command);
private:
// Configuration
std::vector<string> labels_;
int32 average_window_duration_ms_;
float detection_threshold_;
int32 suppression_ms_;
int32 minimum_count_;
// Working variables
std::deque<std::pair<int64, Tensor>> previous_results_;
string previous_top_label_;
int64 labels_count_;
int64 previous_top_label_time_;
};
} // namespace tensorflow
#endif // TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_RECOGNIZE_COMMANDS_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/recognize_commands.h
|
C++
|
apache-2.0
| 3,472
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stream accuracy recognize commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
class RecognizeResult(object):
"""Save recognition result temporarily.
Attributes:
founded_command: A string indicating the word just founded. Default value
is '_silence_'
score: An float representing the confidence of founded word. Default
value is zero.
is_new_command: A boolean indicating if the founded command is a new one
against the last one. Default value is False.
"""
def __init__(self):
self._founded_command = "_silence_"
self._score = 0
self._is_new_command = False
@property
def founded_command(self):
return self._founded_command
@founded_command.setter
def founded_command(self, value):
self._founded_command = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
@property
def is_new_command(self):
return self._is_new_command
@is_new_command.setter
def is_new_command(self, value):
self._is_new_command = value
class RecognizeCommands(object):
"""Smooth the inference results by using average window.
Maintain a slide window over the audio stream, which adds new result(a pair of
the 1.confidences of all classes and 2.the start timestamp of input audio
clip) directly the inference produces one and removes the most previous one
and other abnormal values. Then it smooth the results in the window to get
the most reliable command in this period.
Attributes:
_label: A list containing commands at corresponding lines.
_average_window_duration: The length of average window.
_detection_threshold: A confidence threshold for filtering out unreliable
command.
_suppression_ms: Milliseconds every two reliable founded commands should
apart.
_minimum_count: An integer count indicating the minimum results the average
window should cover.
_previous_results: A deque to store previous results.
_label_count: The length of label list.
_previous_top_label: Last founded command. Initial value is '_silence_'.
_previous_top_time: The timestamp of _previous results. Default is -np.inf.
"""
def __init__(self, labels, average_window_duration_ms, detection_threshold,
suppression_ms, minimum_count):
"""Init the RecognizeCommands with parameters used for smoothing."""
# Configuration
self._labels = labels
self._average_window_duration_ms = average_window_duration_ms
self._detection_threshold = detection_threshold
self._suppression_ms = suppression_ms
self._minimum_count = minimum_count
# Working Variable
self._previous_results = collections.deque()
self._label_count = len(labels)
self._previous_top_label = "_silence_"
self._previous_top_time = -np.inf
def process_latest_result(self, latest_results, current_time_ms,
recognize_element):
"""Smoothing the results in average window when a new result is added in.
Receive a new result from inference and put the founded command into
a RecognizeResult instance after the smoothing procedure.
Args:
latest_results: A list containing the confidences of all labels.
current_time_ms: The start timestamp of the input audio clip.
recognize_element: An instance of RecognizeResult to store founded
command, its scores and if it is a new command.
Raises:
ValueError: The length of this result from inference doesn't match
label count.
ValueError: The timestamp of this result is earlier than the most
previous one in the average window
"""
if latest_results.shape[0] != self._label_count:
raise ValueError("The results for recognition should contain {} "
"elements, but there are {} produced".format(
self._label_count, latest_results.shape[0]))
if (self._previous_results.__len__() != 0 and
current_time_ms < self._previous_results[0][0]):
raise ValueError("Results must be fed in increasing time order, "
"but receive a timestamp of {}, which was earlier "
"than the previous one of {}".format(
current_time_ms, self._previous_results[0][0]))
# Add the latest result to the head of the deque.
self._previous_results.append([current_time_ms, latest_results])
# Prune any earlier results that are too old for the averaging window.
time_limit = current_time_ms - self._average_window_duration_ms
while time_limit > self._previous_results[0][0]:
self._previous_results.popleft()
# If there are too few results, the result will be unreliable and bail.
how_many_results = self._previous_results.__len__()
earliest_time = self._previous_results[0][0]
sample_duration = current_time_ms - earliest_time
if (how_many_results < self._minimum_count or
sample_duration < self._average_window_duration_ms / 4):
recognize_element.founded_command = self._previous_top_label
recognize_element.score = 0.0
recognize_element.is_new_command = False
return
# Calculate the average score across all the results in the window.
average_scores = np.zeros(self._label_count)
for item in self._previous_results:
score = item[1]
for i in range(score.size):
average_scores[i] += score[i] / how_many_results
# Sort the averaged results in descending score order.
sorted_averaged_index_score = []
for i in range(self._label_count):
sorted_averaged_index_score.append([i, average_scores[i]])
sorted_averaged_index_score = sorted(
sorted_averaged_index_score, key=lambda p: p[1], reverse=True)
# Use the information of previous result to get current result
current_top_index = sorted_averaged_index_score[0][0]
current_top_label = self._labels[current_top_index]
current_top_score = sorted_averaged_index_score[0][1]
time_since_last_top = 0
if (self._previous_top_label == "_silence_" or
self._previous_top_time == -np.inf):
time_since_last_top = np.inf
else:
time_since_last_top = current_time_ms - self._previous_top_time
if (current_top_score > self._detection_threshold and
current_top_label != self._previous_top_label and
time_since_last_top > self._suppression_ms):
self._previous_top_label = current_top_label
self._previous_top_time = current_time_ms
recognize_element.is_new_command = True
else:
recognize_element.is_new_command = False
recognize_element.founded_command = current_top_label
recognize_element.score = current_top_score
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/recognize_commands.py
|
Python
|
apache-2.0
| 7,567
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/examples/speech_commands/recognize_commands.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(RecognizeCommandsTest, Basic) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"});
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, 0, &found_command, &score, &is_new_command));
}
TEST(RecognizeCommandsTest, FindCommands) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {0.0f, 1.0f, 0.0f});
bool has_found_new_command = false;
string new_command;
for (int i = 0; i < 10; ++i) {
string found_command;
float score;
bool is_new_command;
int64 current_time_ms = 0 + (i * 100);
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, current_time_ms, &found_command, &score, &is_new_command));
if (is_new_command) {
EXPECT_FALSE(has_found_new_command);
has_found_new_command = true;
new_command = found_command;
}
}
EXPECT_TRUE(has_found_new_command);
EXPECT_EQ("a", new_command);
test::FillValues<float>(&results, {0.0f, 0.0f, 1.0f});
has_found_new_command = false;
new_command = "";
for (int i = 0; i < 10; ++i) {
string found_command;
float score;
bool is_new_command;
int64 current_time_ms = 1000 + (i * 100);
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, current_time_ms, &found_command, &score, &is_new_command));
if (is_new_command) {
EXPECT_FALSE(has_found_new_command);
has_found_new_command = true;
new_command = found_command;
}
}
EXPECT_TRUE(has_found_new_command);
EXPECT_EQ("b", new_command);
}
TEST(RecognizeCommandsTest, BadInputLength) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor bad_results(DT_FLOAT, {2});
test::FillValues<float>(&bad_results, {1.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
EXPECT_FALSE(recognize_commands
.ProcessLatestResults(bad_results, 0, &found_command, &score,
&is_new_command)
.ok());
}
TEST(RecognizeCommandsTest, BadInputTimes) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, 100, &found_command, &score, &is_new_command));
EXPECT_FALSE(recognize_commands
.ProcessLatestResults(results, 0, &found_command, &score,
&is_new_command)
.ok());
}
} // namespace tensorflow
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/recognize_commands_test.cc
|
C++
|
apache-2.0
| 3,824
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/*
Tool to create accuracy statistics from running an audio recognition model on a
continuous stream of samples.
This is designed to be an environment for running experiments on new models and
settings to understand the effects they will have in a real application. You
need to supply it with a long audio file containing sounds you want to recognize
and a text file listing the labels of each sound along with the time they occur.
With this information, and a frozen model, the tool will process the audio
stream, apply the model, and keep track of how many mistakes and successes the
model achieved.
The matched percentage is the number of sounds that were correctly classified,
as a percentage of the total number of sounds listed in the ground truth file.
A correct classification is when the right label is chosen within a short time
of the expected ground truth, where the time tolerance is controlled by the
'time_tolerance_ms' command line flag.
The wrong percentage is how many sounds triggered a detection (the classifier
figured out it wasn't silence or background noise), but the detected class was
wrong. This is also a percentage of the total number of ground truth sounds.
The false positive percentage is how many sounds were detected when there was
only silence or background noise. This is also expressed as a percentage of the
total number of ground truth sounds, though since it can be large it may go
above 100%.
The easiest way to get an audio file and labels to test with is by using the
'generate_streaming_test_wav' script. This will synthesize a test file with
randomly placed sounds and background noise, and output a text file with the
ground truth.
If you want to test natural data, you need to use a .wav with the same sample
rate as your model (often 16,000 samples per second), and note down where the
sounds occur in time. Save this information out as a comma-separated text file,
where the first column is the label and the second is the time in seconds from
the start of the file that it occurs.
Here's an example of how to run the tool:
bazel run tensorflow/examples/speech_commands:test_streaming_accuracy -- \
--wav=/tmp/streaming_test_bg.wav \
--graph=/tmp/conv_frozen.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--ground_truth=/tmp/streaming_test_labels.txt --verbose \
--clip_duration_ms=1000 --detection_threshold=0.70 --average_window_ms=500 \
--suppression_ms=500 --time_tolerance_ms=1500
*/
#include <fstream>
#include <iomanip>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/wav/wav_io.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include "tensorflow/examples/speech_commands/recognize_commands.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::Status;
using tensorflow::Tensor;
using tensorflow::int32;
using tensorflow::int64;
using tensorflow::string;
using tensorflow::uint16;
using tensorflow::uint32;
namespace {
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(const string& graph_file_name,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status =
ReadBinaryProto(tensorflow::Env::Default(), graph_file_name, &graph_def);
if (!load_graph_status.ok()) {
return tensorflow::errors::NotFound("Failed to load compute graph at '",
graph_file_name, "'");
}
session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {
return session_create_status;
}
return Status::OK();
}
// Takes a file name, and loads a list of labels from it, one per line, and
// returns a vector of the strings.
Status ReadLabelsFile(const string& file_name, std::vector<string>* result) {
std::ifstream file(file_name);
if (!file) {
return tensorflow::errors::NotFound("Labels file '", file_name,
"' not found.");
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
return Status::OK();
}
} // namespace
int main(int argc, char* argv[]) {
string wav = "";
string graph = "";
string labels = "";
string ground_truth = "";
string input_data_name = "decoded_sample_data:0";
string input_rate_name = "decoded_sample_data:1";
string output_name = "labels_softmax";
int32 clip_duration_ms = 1000;
int32 clip_stride_ms = 30;
int32 average_window_ms = 500;
int32 time_tolerance_ms = 750;
int32 suppression_ms = 1500;
float detection_threshold = 0.7f;
bool verbose = false;
std::vector<Flag> flag_list = {
Flag("wav", &wav, "audio file to be identified"),
Flag("graph", &graph, "model to be executed"),
Flag("labels", &labels, "path to file containing labels"),
Flag("ground_truth", &ground_truth,
"path to file containing correct times and labels of words in the "
"audio as <word>,<timestamp in ms> lines"),
Flag("input_data_name", &input_data_name,
"name of input data node in model"),
Flag("input_rate_name", &input_rate_name,
"name of input sample rate node in model"),
Flag("output_name", &output_name, "name of output node in model"),
Flag("clip_duration_ms", &clip_duration_ms,
"length of recognition window"),
Flag("average_window_ms", &average_window_ms,
"length of window to smooth results over"),
Flag("time_tolerance_ms", &time_tolerance_ms,
"maximum gap allowed between a recognition and ground truth"),
Flag("suppression_ms", &suppression_ms,
"how long to ignore others for after a recognition"),
Flag("clip_stride_ms", &clip_stride_ms, "how often to run recognition"),
Flag("detection_threshold", &detection_threshold,
"what score is required to trigger detection of a word"),
Flag("verbose", &verbose, "whether to log extra debugging information"),
};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
// We need to call this to set up global state for TensorFlow.
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
// First we load and initialize the model.
std::unique_ptr<tensorflow::Session> session;
Status load_graph_status = LoadGraph(graph, &session);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return -1;
}
std::vector<string> labels_list;
Status read_labels_status = ReadLabelsFile(labels, &labels_list);
if (!read_labels_status.ok()) {
LOG(ERROR) << read_labels_status;
return -1;
}
std::vector<std::pair<string, tensorflow::int64>> ground_truth_list;
Status read_ground_truth_status =
tensorflow::ReadGroundTruthFile(ground_truth, &ground_truth_list);
if (!read_ground_truth_status.ok()) {
LOG(ERROR) << read_ground_truth_status;
return -1;
}
string wav_string;
Status read_wav_status = tensorflow::ReadFileToString(
tensorflow::Env::Default(), wav, &wav_string);
if (!read_wav_status.ok()) {
LOG(ERROR) << read_wav_status;
return -1;
}
std::vector<float> audio_data;
uint32 sample_count;
uint16 channel_count;
uint32 sample_rate;
Status decode_wav_status = tensorflow::wav::DecodeLin16WaveAsFloatVector(
wav_string, &audio_data, &sample_count, &channel_count, &sample_rate);
if (!decode_wav_status.ok()) {
LOG(ERROR) << decode_wav_status;
return -1;
}
if (channel_count != 1) {
LOG(ERROR) << "Only mono .wav files can be used, but input has "
<< channel_count << " channels.";
return -1;
}
const int64 clip_duration_samples = (clip_duration_ms * sample_rate) / 1000;
const int64 clip_stride_samples = (clip_stride_ms * sample_rate) / 1000;
Tensor audio_data_tensor(tensorflow::DT_FLOAT,
tensorflow::TensorShape({clip_duration_samples, 1}));
Tensor sample_rate_tensor(tensorflow::DT_INT32, tensorflow::TensorShape({}));
sample_rate_tensor.scalar<int32>()() = sample_rate;
tensorflow::RecognizeCommands recognize_commands(
labels_list, average_window_ms, detection_threshold, suppression_ms);
std::vector<std::pair<string, int64>> all_found_words;
tensorflow::StreamingAccuracyStats previous_stats;
const int64 audio_data_end = (sample_count - clip_duration_samples);
for (int64 audio_data_offset = 0; audio_data_offset < audio_data_end;
audio_data_offset += clip_stride_samples) {
const float* input_start = &(audio_data[audio_data_offset]);
const float* input_end = input_start + clip_duration_samples;
std::copy(input_start, input_end, audio_data_tensor.flat<float>().data());
// Actually run the audio through the model.
std::vector<Tensor> outputs;
Status run_status = session->Run({{input_data_name, audio_data_tensor},
{input_rate_name, sample_rate_tensor}},
{output_name}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return -1;
}
const int64 current_time_ms = (audio_data_offset * 1000) / sample_rate;
string found_command;
float score;
bool is_new_command;
Status recognize_status = recognize_commands.ProcessLatestResults(
outputs[0], current_time_ms, &found_command, &score, &is_new_command);
if (!recognize_status.ok()) {
LOG(ERROR) << "Recognition processing failed: " << recognize_status;
return -1;
}
if (is_new_command && (found_command != "_silence_")) {
all_found_words.push_back({found_command, current_time_ms});
if (verbose) {
tensorflow::StreamingAccuracyStats stats;
tensorflow::CalculateAccuracyStats(ground_truth_list, all_found_words,
current_time_ms, time_tolerance_ms,
&stats);
int32 false_positive_delta = stats.how_many_false_positives -
previous_stats.how_many_false_positives;
int32 correct_delta = stats.how_many_correct_words -
previous_stats.how_many_correct_words;
int32 wrong_delta =
stats.how_many_wrong_words - previous_stats.how_many_wrong_words;
string recognition_state;
if (false_positive_delta == 1) {
recognition_state = " (False Positive)";
} else if (correct_delta == 1) {
recognition_state = " (Correct)";
} else if (wrong_delta == 1) {
recognition_state = " (Wrong)";
} else {
LOG(ERROR) << "Unexpected state in statistics";
}
LOG(INFO) << current_time_ms << "ms: " << found_command << ": " << score
<< recognition_state;
previous_stats = stats;
tensorflow::PrintAccuracyStats(stats);
}
}
}
tensorflow::StreamingAccuracyStats stats;
tensorflow::CalculateAccuracyStats(ground_truth_list, all_found_words, -1,
time_tolerance_ms, &stats);
tensorflow::PrintAccuracyStats(stats);
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/test_streaming_accuracy.cc
|
C++
|
apache-2.0
| 12,675
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tool to create accuracy statistics on a continuous stream of samples.
This is designed to be an environment for running experiments on new models and
settings to understand the effects they will have in a real application. You
need to supply it with a long audio file containing sounds you want to recognize
and a text file listing the labels of each sound along with the time they occur.
With this information, and a frozen model, the tool will process the audio
stream, apply the model, and keep track of how many mistakes and successes the
model achieved.
The matched percentage is the number of sounds that were correctly classified,
as a percentage of the total number of sounds listed in the ground truth file.
A correct classification is when the right label is chosen within a short time
of the expected ground truth, where the time tolerance is controlled by the
'time_tolerance_ms' command line flag.
The wrong percentage is how many sounds triggered a detection (the classifier
figured out it wasn't silence or background noise), but the detected class was
wrong. This is also a percentage of the total number of ground truth sounds.
The false positive percentage is how many sounds were detected when there was
only silence or background noise. This is also expressed as a percentage of the
total number of ground truth sounds, though since it can be large it may go
above 100%.
The easiest way to get an audio file and labels to test with is by using the
'generate_streaming_test_wav' script. This will synthesize a test file with
randomly placed sounds and background noise, and output a text file with the
ground truth.
If you want to test natural data, you need to use a .wav with the same sample
rate as your model (often 16,000 samples per second), and note down where the
sounds occur in time. Save this information out as a comma-separated text file,
where the first column is the label and the second is the time in seconds from
the start of the file that it occurs.
Here's an example of how to run the tool:
bazel run tensorflow/examples/speech_commands:test_streaming_accuracy_py -- \
--wav=/tmp/streaming_test_bg.wav \
--ground-truth=/tmp/streaming_test_labels.txt --verbose \
--model=/tmp/conv_frozen.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--clip_duration_ms=1000 --detection_threshold=0.70 --average_window_ms=500 \
--suppression_ms=500 --time_tolerance_ms=1500
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy
import tensorflow as tf
from accuracy_utils import StreamingAccuracyStats
from recognize_commands import RecognizeCommands
from recognize_commands import RecognizeResult
from tensorflow.python.ops import io_ops
FLAGS = None
def load_graph(mode_file):
"""Read a tensorflow model, and creates a default graph object."""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(mode_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def read_label_file(file_name):
"""Load a list of label."""
label_list = []
with open(file_name, 'r') as f:
for line in f:
label_list.append(line.strip())
return label_list
def read_wav_file(filename):
"""Load a wav file and return sample_rate and numpy data of float64 type."""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
res = sess.run(wav_decoder, feed_dict={wav_filename_placeholder: filename})
return res.sample_rate, res.audio.flatten()
def main(_):
label_list = read_label_file(FLAGS.labels)
sample_rate, data = read_wav_file(FLAGS.wav)
# Init instance of RecognizeCommands with given parameters.
recognize_commands = RecognizeCommands(
labels=label_list,
average_window_duration_ms=FLAGS.average_window_duration_ms,
detection_threshold=FLAGS.detection_threshold,
suppression_ms=FLAGS.suppression_ms,
minimum_count=4)
# Init instance of StreamingAccuracyStats and load ground truth.
stats = StreamingAccuracyStats()
stats.read_ground_truth_file(FLAGS.ground_truth)
recognize_element = RecognizeResult()
all_found_words = []
data_samples = data.shape[0]
clip_duration_samples = int(FLAGS.clip_duration_ms * sample_rate / 1000)
clip_stride_samples = int(FLAGS.clip_stride_ms * sample_rate / 1000)
audio_data_end = data_samples - clip_duration_samples
# Load model and create a tf session to process audio pieces
recognize_graph = load_graph(FLAGS.model)
with recognize_graph.as_default():
with tf.compat.v1.Session() as sess:
# Get input and output tensor
data_tensor = sess.graph.get_tensor_by_name(FLAGS.input_names[0])
sample_rate_tensor = sess.graph.get_tensor_by_name(FLAGS.input_names[1])
output_softmax_tensor = sess.graph.get_tensor_by_name(FLAGS.output_name)
# Inference along audio stream.
for audio_data_offset in range(0, audio_data_end, clip_stride_samples):
input_start = audio_data_offset
input_end = audio_data_offset + clip_duration_samples
outputs = sess.run(
output_softmax_tensor,
feed_dict={
data_tensor:
numpy.expand_dims(data[input_start:input_end], axis=-1),
sample_rate_tensor:
sample_rate
})
outputs = numpy.squeeze(outputs)
current_time_ms = int(audio_data_offset * 1000 / sample_rate)
try:
recognize_commands.process_latest_result(outputs, current_time_ms,
recognize_element)
except ValueError as e:
tf.compat.v1.logging.error('Recognition processing failed: {}' % e)
return
if (recognize_element.is_new_command and
recognize_element.founded_command != '_silence_'):
all_found_words.append(
[recognize_element.founded_command, current_time_ms])
if FLAGS.verbose:
stats.calculate_accuracy_stats(all_found_words, current_time_ms,
FLAGS.time_tolerance_ms)
try:
recognition_state = stats.delta()
except ValueError as e:
tf.compat.v1.logging.error(
'Statistics delta computing failed: {}'.format(e))
else:
tf.compat.v1.logging.info('{}ms {}:{}{}'.format(
current_time_ms, recognize_element.founded_command,
recognize_element.score, recognition_state))
stats.print_accuracy_stats()
stats.calculate_accuracy_stats(all_found_words, -1, FLAGS.time_tolerance_ms)
stats.print_accuracy_stats()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='test_streaming_accuracy')
parser.add_argument(
'--wav', type=str, default='', help='The wave file path to evaluate.')
parser.add_argument(
'--ground-truth',
type=str,
default='',
help='The ground truth file path corresponding to wav file.')
parser.add_argument(
'--labels',
type=str,
default='',
help='The label file path containing all possible classes.')
parser.add_argument(
'--model', type=str, default='', help='The model used for inference')
parser.add_argument(
'--input-names',
type=str,
nargs='+',
default=['decoded_sample_data:0', 'decoded_sample_data:1'],
help='Input name list involved in model graph.')
parser.add_argument(
'--output-name',
type=str,
default='labels_softmax:0',
help='Output name involved in model graph.')
parser.add_argument(
'--clip-duration-ms',
type=int,
default=1000,
help='Length of each audio clip fed into model.')
parser.add_argument(
'--clip-stride-ms',
type=int,
default=30,
help='Length of audio clip stride over main trap.')
parser.add_argument(
'--average_window_duration_ms',
type=int,
default=500,
help='Length of average window used for smoothing results.')
parser.add_argument(
'--detection-threshold',
type=float,
default=0.7,
help='The confidence for filtering unreliable commands')
parser.add_argument(
'--suppression_ms',
type=int,
default=500,
help='The time interval between every two adjacent commands')
parser.add_argument(
'--time-tolerance-ms',
type=int,
default=1500,
help='Time tolerance before and after the timestamp of this audio clip '
'to match ground truth')
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='Whether to print streaming accuracy on stdout.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/test_streaming_accuracy.py
|
Python
|
apache-2.0
| 9,972
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
For more information, please see
https://www.tensorflow.org/tutorials/audio/simple_audio.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# Set the verbosity based on flags (default is INFO, so we see all messages)
tf.compat.v1.logging.set_verbosity(FLAGS.verbosity)
# Start a new TensorFlow session.
sess = tf.compat.v1.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.feature_bin_count, FLAGS.preprocess)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir,
FLAGS.silence_percentage, FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings, FLAGS.summaries_dir)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
input_placeholder = tf.compat.v1.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
if FLAGS.quantize:
fingerprint_min, fingerprint_max = input_data.get_features_range(
model_settings)
fingerprint_input = tf.quantization.fake_quant_with_min_max_args(
input_placeholder, fingerprint_min, fingerprint_max)
else:
fingerprint_input = input_placeholder
logits, dropout_rate = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.compat.v1.placeholder(
tf.int64, [None], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.compat.v1.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.compat.v1.name_scope('cross_entropy'):
cross_entropy_mean = tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
if FLAGS.quantize:
try:
tf.contrib.quantize.create_training_graph(quant_delay=0)
except AttributeError as e:
msg = e.args[0]
msg += ('\n\n The --quantize option still requires contrib, which is not '
'part of TensorFlow 2.0. Please install a previous version:'
'\n `pip install tensorflow<=1.15`')
e.args = (msg,)
raise e
with tf.compat.v1.name_scope('train'), tf.control_dependencies(
control_dependencies):
learning_rate_input = tf.compat.v1.placeholder(
tf.float32, [], name='learning_rate_input')
if FLAGS.optimizer == 'gradient_descent':
train_step = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
elif FLAGS.optimizer == 'momentum':
train_step = tf.compat.v1.train.MomentumOptimizer(
learning_rate_input, .9,
use_nesterov=True).minimize(cross_entropy_mean)
else:
raise Exception('Invalid Optimizer')
predicted_indices = tf.argmax(input=logits, axis=1)
correct_prediction = tf.equal(predicted_indices, ground_truth_input)
confusion_matrix = tf.math.confusion_matrix(labels=ground_truth_input,
predictions=predicted_indices,
num_classes=label_count)
evaluation_step = tf.reduce_mean(input_tensor=tf.cast(correct_prediction,
tf.float32))
with tf.compat.v1.get_default_graph().name_scope('eval'):
tf.compat.v1.summary.scalar('cross_entropy', cross_entropy_mean)
tf.compat.v1.summary.scalar('accuracy', evaluation_step)
global_step = tf.compat.v1.train.get_or_create_global_step()
increment_global_step = tf.compat.v1.assign(global_step, global_step + 1)
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.compat.v1.summary.merge_all(scope='eval')
train_writer = tf.compat.v1.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.compat.v1.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
tf.compat.v1.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.compat.v1.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.io.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries,
evaluation_step,
cross_entropy_mean,
train_step,
increment_global_step,
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_rate: 0.5
})
train_writer.add_summary(train_summary, training_step)
tf.compat.v1.logging.debug(
'Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
tf.compat.v1.logging.info(
'Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_rate: 0.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.compat.v1.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.compat.v1.logging.info('Step %d: Validation accuracy = %.1f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint periodically.
if (training_step % FLAGS.save_step_interval == 0 or
training_step == training_steps_max):
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.model_architecture + '.ckpt')
tf.compat.v1.logging.info('Saving to "%s-%d"', checkpoint_path,
training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
set_size = audio_processor.set_size('testing')
tf.compat.v1.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_rate: 0.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.compat.v1.logging.warn('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.compat.v1.logging.warn('Final test accuracy = %.1f%% (N=%d)' %
(total_accuracy * 100, set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is.',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How far to move in time between spectrogram timeslices.',
)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc", "average", or "micro"')
# Function used to parse --verbosity argument
def verbosity_arg(value):
"""Parses verbosity argument.
Args:
value: A member of tf.logging.
Raises:
ArgumentTypeError: Not an expected value.
"""
value = value.upper()
if value == 'DEBUG':
return tf.compat.v1.logging.DEBUG
elif value == 'INFO':
return tf.compat.v1.logging.INFO
elif value == 'WARN':
return tf.compat.v1.logging.WARN
elif value == 'ERROR':
return tf.compat.v1.logging.ERROR
elif value == 'FATAL':
return tf.compat.v1.logging.FATAL
else:
raise argparse.ArgumentTypeError('Not an expected value')
parser.add_argument(
'--verbosity',
type=verbosity_arg,
default=tf.compat.v1.logging.INFO,
help='Log verbosity. Can be "DEBUG", "INFO", "WARN", "ERROR", or "FATAL"')
parser.add_argument(
'--optimizer',
type=str,
default='gradient_descent',
help='Optimizer (gradient_descent or momentum)')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/train.py
|
Python
|
apache-2.0
| 19,825
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import tensorflow as tf
from tensorflow.examples.speech_commands import train
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def requires_contrib(test_method):
try:
_ = tf.contrib
except AttributeError:
test_method = unittest.skip(
'This test requires tf.contrib:\n `pip install tensorflow<=1.15`')(
test_method)
return test_method
# Used to convert a dictionary into an object, for mocking parsed flags.
class DictStruct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
class TrainTest(test.TestCase):
def _getWavData(self):
with self.cached_session():
sample_data = tf.zeros([32000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, 'wb') as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, 'some_audio_%d.wav' % i)
self._saveTestWavFile(file_path, wav_data)
def _prepareDummyTrainingData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, 'wavs')
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ['a', 'b', 'c'], 100)
background_dir = os.path.join(wav_dir, '_background_noise_')
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, 'background_audio_%d.wav' % i)
self._saveTestWavFile(file_path, wav_data)
return wav_dir
def _getDefaultFlags(self):
flags = {
'data_url': '',
'data_dir': self._prepareDummyTrainingData(),
'wanted_words': 'a,b,c',
'sample_rate': 16000,
'clip_duration_ms': 1000,
'window_size_ms': 30,
'window_stride_ms': 20,
'feature_bin_count': 40,
'preprocess': 'mfcc',
'silence_percentage': 25,
'unknown_percentage': 25,
'validation_percentage': 10,
'testing_percentage': 10,
'summaries_dir': os.path.join(self.get_temp_dir(), 'summaries'),
'train_dir': os.path.join(self.get_temp_dir(), 'train'),
'time_shift_ms': 100,
'how_many_training_steps': '2',
'learning_rate': '0.01',
'quantize': False,
'model_architecture': 'conv',
'check_nans': False,
'start_checkpoint': '',
'batch_size': 1,
'background_volume': 0.25,
'background_frequency': 0.8,
'eval_step_interval': 1,
'save_step_interval': 1,
'verbosity': tf.compat.v1.logging.INFO,
'optimizer': 'gradient_descent'
}
return DictStruct(**flags)
@test_util.run_deprecated_v1
def testTrain(self):
train.FLAGS = self._getDefaultFlags()
train.main('')
self.assertTrue(
gfile.Exists(
os.path.join(train.FLAGS.train_dir,
train.FLAGS.model_architecture + '.pbtxt')))
self.assertTrue(
gfile.Exists(
os.path.join(train.FLAGS.train_dir,
train.FLAGS.model_architecture + '_labels.txt')))
self.assertTrue(
gfile.Exists(
os.path.join(train.FLAGS.train_dir,
train.FLAGS.model_architecture + '.ckpt-1.meta')))
if __name__ == '__main__':
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/train_test.py
|
Python
|
apache-2.0
| 4,503
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts WAV audio files into input features for neural networks.
The models used in this example take in two-dimensional spectrograms as the
input to their neural network portions. For testing and porting purposes it's
useful to be able to generate these spectrograms outside of the full model, so
that on-device implementations using their own FFT and streaming code can be
tested against the version used in training for example. The output is as a
C source file, so it can be easily linked into an embedded test application.
To use this, run:
bazel run tensorflow/examples/speech_commands:wav_to_features -- \
--input_wav=my.wav --output_c_file=my_wav_data.c
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, feature_bin_count, quantize, preprocess,
input_wav, output_c_file):
"""Converts an audio file into its corresponding feature map.
Args:
sample_rate: Expected sample rate of the wavs.
clip_duration_ms: Expected duration in milliseconds of the wavs.
window_size_ms: How long each spectrogram timeslice is.
window_stride_ms: How far to move in time between spectrogram timeslices.
feature_bin_count: How many bins to use for the feature fingerprint.
quantize: Whether to train the model for eight-bit deployment.
preprocess: Spectrogram processing mode; "mfcc", "average" or "micro".
input_wav: Path to the audio WAV file to read.
output_c_file: Where to save the generated C source file.
"""
# Start a new TensorFlow session.
sess = tf.compat.v1.InteractiveSession()
model_settings = models.prepare_model_settings(
0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,
feature_bin_count, preprocess)
audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,
model_settings, None)
results = audio_processor.get_features_for_wav(input_wav, model_settings,
sess)
features = results[0]
variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]
# Save a C source file containing the feature data as an array.
with gfile.GFile(output_c_file, 'w') as f:
f.write('/* File automatically created by\n')
f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\n')
f.write(' * --sample_rate=%d \\\n' % sample_rate)
f.write(' * --clip_duration_ms=%d \\\n' % clip_duration_ms)
f.write(' * --window_size_ms=%d \\\n' % window_size_ms)
f.write(' * --window_stride_ms=%d \\\n' % window_stride_ms)
f.write(' * --feature_bin_count=%d \\\n' % feature_bin_count)
if quantize:
f.write(' * --quantize=1 \\\n')
f.write(' * --preprocess="%s" \\\n' % preprocess)
f.write(' * --input_wav="%s" \\\n' % input_wav)
f.write(' * --output_c_file="%s" \\\n' % output_c_file)
f.write(' */\n\n')
f.write('const int g_%s_width = %d;\n' %
(variable_base, model_settings['fingerprint_width']))
f.write('const int g_%s_height = %d;\n' %
(variable_base, model_settings['spectrogram_length']))
if quantize:
features_min, features_max = input_data.get_features_range(model_settings)
f.write('const unsigned char g_%s_data[] = {' % variable_base)
i = 0
for value in features.flatten():
quantized_value = int(
round(
(255 * (value - features_min)) / (features_max - features_min)))
if quantized_value < 0:
quantized_value = 0
if quantized_value > 255:
quantized_value = 255
if i == 0:
f.write('\n ')
f.write('%d, ' % (quantized_value))
i = (i + 1) % 10
else:
f.write('const float g_%s_data[] = {\n' % variable_base)
i = 0
for value in features.flatten():
if i == 0:
f.write('\n ')
f.write('%f, ' % value)
i = (i + 1) % 10
f.write('\n};\n')
def main(_):
# We want to see all the logging messages.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
wav_to_features(FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms,
FLAGS.feature_bin_count, FLAGS.quantize, FLAGS.preprocess,
FLAGS.input_wav, FLAGS.output_c_file)
tf.compat.v1.logging.info('Wrote to "%s"' % (FLAGS.output_c_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is.',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How far to move in time between spectrogram timeslices.',
)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc", "average", or "micro"')
parser.add_argument(
'--input_wav',
type=str,
default=None,
help='Path to the audio WAV file to read')
parser.add_argument(
'--output_c_file',
type=str,
default=None,
help='Where to save the generated C source file containing the features')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/wav_to_features.py
|
Python
|
apache-2.0
| 6,930
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.examples.speech_commands import wav_to_features
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class WavToFeaturesTest(test.TestCase):
def _getWavData(self):
with self.cached_session():
sample_data = tf.zeros([32000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
@test_util.run_deprecated_v1
def testWavToFeatures(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
input_file_path = os.path.join(tmp_dir, "input.wav")
output_file_path = os.path.join(tmp_dir, "output.c")
wav_data = self._getWavData()
self._saveTestWavFile(input_file_path, wav_data)
wav_to_features.wav_to_features(16000, 1000, 10, 10, 40, True, "average",
input_file_path, output_file_path)
with open(output_file_path, "rb") as f:
content = f.read()
self.assertIn(b"const unsigned char g_input_data", content)
@test_util.run_deprecated_v1
def testWavToFeaturesMicro(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
input_file_path = os.path.join(tmp_dir, "input.wav")
output_file_path = os.path.join(tmp_dir, "output.c")
wav_data = self._getWavData()
self._saveTestWavFile(input_file_path, wav_data)
wav_to_features.wav_to_features(16000, 1000, 10, 10, 40, True, "micro",
input_file_path, output_file_path)
with open(output_file_path, "rb") as f:
content = f.read()
self.assertIn(b"const unsigned char g_input_data", content)
if __name__ == "__main__":
test.main()
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/wav_to_features_test.py
|
Python
|
apache-2.0
| 3,212
|
{
"cells": [
{
"cell_type": "markdown",
"source": [
"# 训练唤醒词模型"
],
"metadata": {
"colab_type": "text",
"id": "pO4-CY_TCZZS"
}
},
{
"cell_type": "markdown",
"source": [
"This notebook demonstrates how to train a 20 kB [Simple Audio Recognition](https://www.tensorflow.org/tutorials/sequences/audio_recognition) model to recognize keywords in speech.\n",
"\n",
"The model created in this notebook is used in the [micro_speech](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech) example for [TensorFlow Lite for MicroControllers](https://www.tensorflow.org/lite/microcontrollers/overview).\n",
"\n"
],
"metadata": {
"colab_type": "text",
"id": "BaFfr7DHRmGF"
}
},
{
"cell_type": "markdown",
"source": [
"**Training is much faster using GPU acceleration.** Before you proceed, ensure you are using a GPU runtime by going to **Runtime -> Change runtime type** and set **Hardware accelerator: GPU**. Training 15,000 iterations will take 1.5 - 2 hours on a GPU runtime.\n",
"\n",
"## 模型配置\n",
"\n",
"**MODIFY** the following constants for your specific use case."
],
"metadata": {
"colab_type": "text",
"id": "XaVtYN4nlCft"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# A comma-delimited list of the words you want to train for.\n",
"# The options are: yes,no,up,down,left,right,on,off,stop,go\n",
"# All the other words will be used to train an \"unknown\" label and silent\n",
"# audio data with no spoken words will be used to train a \"silence\" label.\n",
"WANTED_WORDS = \"on,off\"\n",
"\n",
"# The number of steps and learning rates can be specified as comma-separated\n",
"# lists to define the rate at each stage. For example,\n",
"# TRAINING_STEPS=12000,3000 and LEARNING_RATE=0.001,0.0001\n",
"# will run 12,000 training loops in total, with a rate of 0.001 for the first\n",
"# 8,000, and 0.0001 for the final 3,000.\n",
"# TRAINING_STEPS = \"15000,3000\"\n",
"TRAINING_STEPS = \"15000,3000\"\n",
"LEARNING_RATE = \"0.001,0.0001\"\n",
"\n",
"# Calculate the total number of steps, which is used to identify the checkpoint\n",
"# file name.\n",
"TOTAL_STEPS = str(sum(map(lambda string: int(string), TRAINING_STEPS.split(\",\"))))\n",
"\n",
"# Print the configuration to confirm it\n",
"print(\"Training these words: %s\" % WANTED_WORDS)\n",
"print(\"Training steps in each stage: %s\" % TRAINING_STEPS)\n",
"print(\"Learning rate in each stage: %s\" % LEARNING_RATE)\n",
"print(\"Total number of training steps: %s\" % TOTAL_STEPS)"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ludfxbNIaegy"
}
},
{
"cell_type": "markdown",
"source": [
"**DO NOT MODIFY** the following constants as they include filepaths used in this notebook and data that is shared during training and inference."
],
"metadata": {
"colab_type": "text",
"id": "gCgeOpvY9pAi"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# Calculate the percentage of 'silence' and 'unknown' training samples required\n",
"# to ensure that we have equal number of samples for each label.\n",
"number_of_labels = WANTED_WORDS.count(',') + 1\n",
"number_of_total_labels = number_of_labels + 2 # for 'silence' and 'unknown' label\n",
"equal_percentage_of_training_samples = int(100.0/(number_of_total_labels))\n",
"SILENT_PERCENTAGE = equal_percentage_of_training_samples\n",
"UNKNOWN_PERCENTAGE = equal_percentage_of_training_samples\n",
"VALIDATION_PERCENTAGE = 10 #10\n",
"TESTING_PERCENTAGE = 10 # 10\n",
"\n",
"print('SILENT_PERCENTAGE : %d' % SILENT_PERCENTAGE)\n",
"print('UNKNOWN_PERCENTAGE : %d' % UNKNOWN_PERCENTAGE)\n",
"print('VALIDATION_PERCENTAGE : %d' % VALIDATION_PERCENTAGE)\n",
"print('TESTING_PERCENTAGE : %d' % TESTING_PERCENTAGE)\n",
"\n",
"# Constants which are shared during training and inference\n",
"PREPROCESS = 'micro'\n",
"WINDOW_STRIDE = 20\n",
"MODEL_ARCHITECTURE = 'tiny_conv' # Other options include: single_fc, conv,\n",
" # low_latency_conv, low_latency_svdf, tiny_embedding_conv\n",
"\n",
"# Constants used during training only\n",
"VERBOSITY = 'WARN'\n",
"EVAL_STEP_INTERVAL = '100' # '1000'\n",
"SAVE_STEP_INTERVAL = '100' # '1000'\n",
"BATCH_SIZE = '64' # default 100\n",
"\n",
"# Constants for training directories and filepaths\n",
"DATASET_DIR = './dataset/'\n",
"\n",
"LOGS_DIR = 'logs/'\n",
"TRAIN_DIR = 'train/' # for training checkpoints and other files.\n",
"\n",
"# Constants for inference directories and filepaths\n",
"import os\n",
"MODELS_DIR = 'models'\n",
"if not os.path.exists(MODELS_DIR):\n",
" os.mkdir(MODELS_DIR)\n",
"MODEL_TF = os.path.join(MODELS_DIR, 'model.pb')\n",
"MODEL_TFLITE = os.path.join(MODELS_DIR, 'model.tflite')\n",
"FLOAT_MODEL_TFLITE = os.path.join(MODELS_DIR, 'float_model.tflite')\n",
"MODEL_TFLITE_MICRO = os.path.join(MODELS_DIR, 'model.cc')\n",
"SAVED_MODEL = os.path.join(MODELS_DIR, 'saved_model')\n",
"\n",
"QUANT_INPUT_MIN = 0.0\n",
"QUANT_INPUT_MAX = 26.0\n",
"QUANT_INPUT_RANGE = QUANT_INPUT_MAX - QUANT_INPUT_MIN"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "Nd1iM1o2ymvA"
}
},
{
"cell_type": "markdown",
"source": [
"## 环境安装\n",
"\n",
"安装依赖项"
],
"metadata": {
"colab_type": "text",
"id": "6rLYpvtg9P4o"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"#%tensorflow_version 1.x\n",
"import tensorflow as tf"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ed_XpUrU5DvY"
}
},
{
"cell_type": "markdown",
"source": [
"**DELETE** any old data from previous runs\n"
],
"metadata": {
"colab_type": "text",
"id": "T9Ty5mR58E4i"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"#!rm -rf {DATASET_DIR} {LOGS_DIR} {TRAIN_DIR} {MODELS_DIR}\n",
"!rm -rf {LOGS_DIR} {TRAIN_DIR} {MODELS_DIR}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "APGx0fEh7hFF"
}
},
{
"cell_type": "markdown",
"source": [
"Clone the TensorFlow Github Repository, which contains the relevant code required to run this tutorial."
],
"metadata": {
"colab_type": "text",
"id": "GfEUlfFBizio"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"#!git clone -q --depth 1 https://github.com/tensorflow/tensorflow"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "yZArmzT85SLq"
}
},
{
"cell_type": "markdown",
"source": [
"Load TensorBoard to visualize the accuracy and loss as training proceeds.\n"
],
"metadata": {
"colab_type": "text",
"id": "nS9swHLSi7Bi"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"#%load_ext tensorboard\n",
"#%tensorboard --logdir {LOGS_DIR}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "q4qF1VxP3UE4"
}
},
{
"cell_type": "markdown",
"source": [
"## 模型训练\n",
"\n",
"The following script downloads the dataset and begin training."
],
"metadata": {
"colab_type": "text",
"id": "x1J96Ron-O4R"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"!python ./speech_commands/train.py \\\n",
"--data_dir={DATASET_DIR} \\\n",
"--wanted_words={WANTED_WORDS} \\\n",
"--silence_percentage={SILENT_PERCENTAGE} \\\n",
"--unknown_percentage={UNKNOWN_PERCENTAGE} \\\n",
"--validation_percentage={VALIDATION_PERCENTAGE} \\\n",
"--testing_percentage={TESTING_PERCENTAGE} \\\n",
"--batch_size={BATCH_SIZE} \\\n",
"--preprocess={PREPROCESS} \\\n",
"--window_stride={WINDOW_STRIDE} \\\n",
"--model_architecture={MODEL_ARCHITECTURE} \\\n",
"--how_many_training_steps={TRAINING_STEPS} \\\n",
"--learning_rate={LEARNING_RATE} \\\n",
"--train_dir={TRAIN_DIR} \\\n",
"--summaries_dir={LOGS_DIR} \\\n",
"--verbosity={VERBOSITY} \\\n",
"--eval_step_interval={EVAL_STEP_INTERVAL} \\\n",
"--save_step_interval={SAVE_STEP_INTERVAL}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "VJsEZx6lynbY"
}
},
{
"cell_type": "markdown",
"source": [
"## 生成tensorflow模型\n",
"\n",
"Combine relevant training results (graph, weights, etc) into a single file for inference. This process is known as freezing a model and the resulting model is known as a frozen model/graph, as it cannot be further re-trained after this process."
],
"metadata": {
"colab_type": "text",
"id": "XQUJLrdS-ftl"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"!rm -rf {SAVED_MODEL}\n",
"!python ./speech_commands/freeze.py \\\n",
"--wanted_words=$WANTED_WORDS \\\n",
"--window_stride_ms=$WINDOW_STRIDE \\\n",
"--preprocess=$PREPROCESS \\\n",
"--model_architecture=$MODEL_ARCHITECTURE \\\n",
"--start_checkpoint=$TRAIN_DIR$MODEL_ARCHITECTURE'.ckpt-'{TOTAL_STEPS} \\\n",
"--save_format=saved_model \\\n",
"--output_file={SAVED_MODEL}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "xyc3_eLh9sAg"
}
},
{
"cell_type": "markdown",
"source": [
"## 生成Tensorflow Lite模型\n",
"\n",
"Convert the frozen graph into a TensorFlow Lite model, which is fully quantized for use with embedded devices.\n",
"\n",
"The following cell will also print the model size, which will be under 20 kilobytes."
],
"metadata": {
"colab_type": "text",
"id": "_DBGDxVI-nKG"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"import sys\n",
"# We add this path so we can import the speech processing modules.\n",
"sys.path.append(\"./speech_commands/\")\n",
"import input_data\n",
"import models\n",
"import numpy as np"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "RIitkqvGWmre"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"SAMPLE_RATE = 16000\n",
"CLIP_DURATION_MS = 1000\n",
"WINDOW_SIZE_MS = 30.0\n",
"FEATURE_BIN_COUNT = 40\n",
"BACKGROUND_FREQUENCY = 0.8\n",
"BACKGROUND_VOLUME_RANGE = 0.1\n",
"TIME_SHIFT_MS = 100.0\n",
"\n",
"DATA_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz'\n",
"VALIDATION_PERCENTAGE = 10 #10\n",
"TESTING_PERCENTAGE = 10 # 10"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "kzqECqMxgBh4"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"model_settings = models.prepare_model_settings(\n",
" len(input_data.prepare_words_list(WANTED_WORDS.split(','))),\n",
" SAMPLE_RATE, CLIP_DURATION_MS, WINDOW_SIZE_MS,\n",
" WINDOW_STRIDE, FEATURE_BIN_COUNT, PREPROCESS)\n",
"audio_processor = input_data.AudioProcessor(\n",
" DATA_URL, DATASET_DIR,\n",
" SILENT_PERCENTAGE, UNKNOWN_PERCENTAGE,\n",
" WANTED_WORDS.split(','), VALIDATION_PERCENTAGE,\n",
" TESTING_PERCENTAGE, model_settings, LOGS_DIR)"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "rNQdAplJV1fz"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"with tf.Session() as sess:\n",
" float_converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n",
" float_tflite_model = float_converter.convert()\n",
" float_tflite_model_size = open(FLOAT_MODEL_TFLITE, \"wb\").write(float_tflite_model)\n",
" print(\"Float model is %d bytes\" % float_tflite_model_size)\n",
"\n",
" converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n",
" converter.optimizations = [tf.lite.Optimize.DEFAULT]\n",
" converter.inference_input_type = tf.lite.constants.INT8\n",
" converter.inference_output_type = tf.lite.constants.INT8\n",
" def representative_dataset_gen():\n",
" set_size = audio_processor.set_size('testing') #get test set size\n",
" for i in range(set_size): # change 100 to set_size\n",
" data, _ = audio_processor.get_data(1, i*1, model_settings,\n",
" BACKGROUND_FREQUENCY, \n",
" BACKGROUND_VOLUME_RANGE,\n",
" TIME_SHIFT_MS,\n",
" 'testing',\n",
" sess)\n",
" flattened_data = np.array(data.flatten(), dtype=np.float32).reshape(1, 1960)\n",
" yield [flattened_data]\n",
" converter.representative_dataset = representative_dataset_gen\n",
" tflite_model = converter.convert()\n",
" tflite_model_size = open(MODEL_TFLITE, \"wb\").write(tflite_model)\n",
" print(\"Quantized model is %d bytes\" % tflite_model_size)\n"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "lBj_AyCh1cC0"
}
},
{
"cell_type": "markdown",
"source": [
"## 测试TensorFlow Lite model的精确度\n",
"\n",
"Verify that the model we've exported is still accurate, using the TF Lite Python API and our test set."
],
"metadata": {
"colab_type": "text",
"id": "EeLiDZTbLkzv"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# Helper function to run inference\n",
"def run_tflite_inference(tflite_model_path, model_type=\"Float\"):\n",
" # Load test data\n",
" np.random.seed(0) # set random seed for reproducible test results.\n",
" with tf.Session() as sess:\n",
" test_data, test_labels = audio_processor.get_data(\n",
" -1, 0, model_settings, BACKGROUND_FREQUENCY, BACKGROUND_VOLUME_RANGE,\n",
" TIME_SHIFT_MS, 'testing', sess)\n",
" test_data = np.expand_dims(test_data, axis=1).astype(np.float32)\n",
"\n",
" # Initialize the interpreter\n",
" interpreter = tf.lite.Interpreter(tflite_model_path)\n",
" interpreter.allocate_tensors()\n",
"\n",
" input_details = interpreter.get_input_details()[0]\n",
" output_details = interpreter.get_output_details()[0]\n",
"\n",
" # For quantized models, manually quantize the input data from float to integer\n",
" if model_type == \"Quantized\":\n",
" input_scale, input_zero_point = input_details[\"quantization\"]\n",
" test_data = test_data / input_scale + input_zero_point\n",
" test_data = test_data.astype(input_details[\"dtype\"])\n",
"\n",
" correct_predictions = 0\n",
" for i in range(len(test_data)):\n",
" interpreter.set_tensor(input_details[\"index\"], test_data[i])\n",
" interpreter.invoke()\n",
" output = interpreter.get_tensor(output_details[\"index\"])[0]\n",
" top_prediction = output.argmax()\n",
" correct_predictions += (top_prediction == test_labels[i])\n",
"\n",
" print('%s model accuracy is %f%% (Number of test samples=%d)' % (\n",
" model_type, (correct_predictions * 100) / len(test_data), len(test_data)))"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "wQsEteKRLryJ"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# Compute float model accuracy\n",
"run_tflite_inference(FLOAT_MODEL_TFLITE)\n",
"\n",
"# Compute quantized model accuracy\n",
"run_tflite_inference(MODEL_TFLITE, model_type='Quantized')"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "l-pD52Na6jRa"
}
},
{
"cell_type": "markdown",
"source": [
"## 模型转换\n",
"Convert the TensorFlow Lite model into a C source file that can be loaded by TensorFlow Lite for Microcontrollers."
],
"metadata": {
"colab_type": "text",
"id": "dt6Zqbxu-wIi"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# Install xxd if it is not available\n",
"#!apt-get update && apt-get -qq install xxd\n",
"# Convert to a C source file\n",
"!xxd -i {MODEL_TFLITE} > {MODEL_TFLITE_MICRO}\n",
"# Update variable names\n",
"REPLACE_TEXT = MODEL_TFLITE.replace('/', '_').replace('.', '_')\n",
"!sed -i 's/'{REPLACE_TEXT}'/g_model/g' {MODEL_TFLITE_MICRO}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "XohZOTjR8ZyE"
}
},
{
"cell_type": "markdown",
"source": [
"## 部署到微控制器\n",
"\n",
"Follow the instructions in the [micro_speech](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech) README.md for [TensorFlow Lite for MicroControllers](https://www.tensorflow.org/lite/microcontrollers/overview) to deploy this model on a specific microcontroller.\n",
"\n",
"**Reference Model:** If you have not modified this notebook, you can follow the instructions as is, to deploy the model. Refer to the [`micro_speech/train/models`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/train/models) directory to access the models generated in this notebook.\n",
"\n",
"**New Model:** If you have generated a new model to identify different words: (i) Update `kCategoryCount` and `kCategoryLabels` in [`micro_speech/micro_features/micro_model_settings.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) and (ii) Update the values assigned to the variables defined in [`micro_speech/micro_features/model.cc`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/model.cc) with values displayed after running the following cell."
],
"metadata": {
"colab_type": "text",
"id": "2pQnN0i_-0L2"
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# Print the C source file\n",
"!cat {MODEL_TFLITE_MICRO}"
],
"outputs": [],
"metadata": {
"colab": {},
"colab_type": "code",
"id": "eoYyh0VU8pca"
}
},
{
"cell_type": "markdown",
"source": [
"# 生成测试音频流\n",
"用于测试TFLite模型,可选。"
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"!python ./speech_commands/generate_streaming_test_wav.py \\\n",
" --wanted_words='on,off' \\\n",
" --data_dir=./my_only_dataset --background_dir=./my_only_dataset/_background_noise_ \\\n",
" --background_volume=0.1 --test_duration_seconds=600 \\\n",
" --output_audio_file=./tmp/streaming_test.wav \\\n",
" --output_labels_file=./tmp/streaming_test_labels.txt"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "markdown",
"source": [
"# 测试音频流准确率"
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"!python ./speech_commands/test_streaming_accuracy.py \\\n",
" --wav=./tmp/streaming_test.wav \\\n",
" --ground-truth=./tmp/streaming_test_labels.txt --verbose \\\n",
" --model=./models/saved_model/ \\\n",
" --labels=./train/tiny_conv_labels.txt \\\n",
" --clip_duration_ms=1000 --detection_threshold=0.70 --average_window_ms=500 \\\n",
" --suppression_ms=500 --time_tolerance_ms=1500"
],
"outputs": [],
"metadata": {}
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "train_micro_speech_model.ipynb",
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.8.6 64-bit ('ai': conda)"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
},
"interpreter": {
"hash": "abb811d8b1be5969553a0033f5a252013244c1763569cb77f5209b62dcc65a34"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/train/train_micro_speech_model.ipynb
|
Jupyter Notebook
|
apache-2.0
| 20,969
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// See the header for documentation on the meaning of this data.
#include "tensorflow/lite/micro/examples/micro_speech/yes_1000ms_sample_data.h"
const int g_yes_1000ms_sample_data_size = 16000;
const int16_t g_yes_1000ms_sample_data[16000] = {
-7, -12, -18, -20, -20, -21, -21, -25, -29,
-31, -31, -30, -30, -29, -30, -30, -29, -28,
-24, -22, -17, -12, -8, -7, -6, -1, 2,
5, 7, 8, 11, 15, 18, 19, 23, 24,
24, 27, 27, 26, 25, 28, 30, 32, 33,
31, 29, 27, 28, 30, 28, 26, 26, 24,
22, 17, 16, 15, 13, 10, 5, 0, -4,
-4, -7, -9, -12, -14, -14, -13, -11, -10,
-8, -6, -3, 3, 7, 8, 12, 15, 18,
21, 19, 19, 21, 23, 24, 23, 22, 19,
17, 11, 5, -3, -12, -22, -28, -35, -45,
-54, -62, -69, -76, -84, -92, -100, -109, -116,
-117, -120, -120, -120, -122, -124, -126, -123, -121,
-116, -113, -107, -97, -88, -75, -61, -50, -41,
-27, -12, 4, 21, 37, 58, 76, 93, 108,
121, 137, 156, 172, 184, 196, 205, 215, 224,
235, 242, 245, 242, 240, 238, 231, 223, 214,
205, 195, 178, 158, 135, 112, 90, 69, 46,
19, -11, -45, -76, -105, -133, -159, -186, -211,
-236, -260, -280, -294, -308, -320, -331, -336, -338,
-335, -326, -316, -301, -286, -267, -246, -225, -203,
-180, -154, -124, -91, -59, -34, -8, 19, 42,
64, 87, 103, 119, 134, 148, 162, 174, 182,
188, 190, 189, 187, 184, 180, 177, 171, 162,
154, 144, 137, 129, 118, 106, 95, 81, 69,
58, 48, 37, 26, 14, 3, -7, -22, -31,
-42, -52, -62, -69, -75, -79, -82, -87, -88,
-92, -94, -91, -87, -85, -81, -74, -70, -64,
-55, -47, -40, -33, -25, -19, -12, -6, -4,
-1, 1, 1, -2, -9, -15, -17, -18, -20,
-22, -22, -26, -31, -33, -35, -31, -26, -17,
-4, 8, 19, 31, 44, 54, 64, 71, 79,
86, 92, 102, 109, 111, 109, 104, 96, 84,
70, 60, 51, 38, 27, 13, 4, -3, -9,
-13, -18, -26, -33, -32, -27, -20, -10, -4,
2, 6, 10, 14, 16, 21, 25, 29, 31,
33, 35, 37, 33, 22, 15, 13, 11, 12,
9, 5, 2, 1, -3, -9, -17, -27, -32,
-35, -36, -36, -42, -50, -56, -66, -77, -85,
-96, -100, -106, -113, -118, -121, -119, -117, -119,
-122, -124, -123, -112, -94, -77, -64, -51, -37,
-22, -3, 17, 37, 54, 68, 86, 100, 114,
134, 154, 167, 174, 178, 182, 189, 189, 187,
185, 179, 177, 174, 171, 157, 138, 123, 108,
94, 76, 50, 25, 6, -8, -20, -37, -59,
-86, -110, -132, -147, -159, -169, -178, -191, -203,
-213, -217, -215, -208, -199, -194, -195, -190, -178,
-165, -155, -144, -134, -123, -103, -80, -56, -35,
-18, -4, 11, 23, 36, 50, 65, 78, 93,
111, 122, 129, 132, 131, 127, 125, 126, 126,
128, 127, 125, 122, 118, 111, 108, 104, 99,
93, 89, 90, 87, 82, 78, 75, 68, 65,
67, 69, 66, 61, 54, 39, 28, 15, 3,
-7, -18, -25, -29, -35, -42, -52, -66, -78,
-83, -85, -86, -86, -82, -83, -84, -83, -81,
-75, -62, -57, -53, -49, -46, -41, -34, -26,
-16, -10, -7, -2, 2, 6, 12, 15, 19,
18, 15, 17, 21, 24, 30, 33, 27, 22,
21, 20, 23, 24, 21, 15, 13, 8, 3,
1, -1, -3, -4, -6, -9, -11, -11, -8,
-10, -13, -15, -19, -17, -11, -2, 1, 2,
6, 9, 10, 12, 13, 9, 8, 10, 13,
20, 18, 13, 10, 4, 1, -2, -6, -11,
-13, -16, -18, -15, -18, -21, -21, -22, -23,
-25, -23, -22, -20, -19, -16, -12, -10, -9,
-11, -15, -19, -22, -19, -14, -11, -9, -11,
-17, -20, -18, -19, -15, -11, -8, -2, 8,
19, 30, 36, 37, 36, 38, 45, 57, 69,
77, 81, 79, 75, 76, 74, 69, 66, 60,
53, 45, 36, 28, 22, 17, 10, 0, -5,
-11, -15, -18, -26, -31, -33, -34, -34, -35,
-37, -37, -35, -28, -24, -29, -37, -45, -46,
-41, -36, -31, -32, -33, -37, -37, -36, -36,
-34, -27, -19, -14, -11, -8, -1, 6, 14,
19, 21, 25, 30, 34, 38, 38, 33, 26,
22, 19, 20, 18, 17, 15, 10, 2, -3,
-5, -10, -13, -13, -13, -16, -16, -16, -15,
-13, -14, -13, -16, -19, -20, -18, -17, -18,
-16, -16, -24, -28, -28, -28, -23, -21, -21,
-20, -24, -27, -23, -18, -14, -7, 4, 11,
15, 19, 21, 25, 33, 39, 41, 45, 47,
50, 56, 58, 57, 59, 59, 55, 50, 47,
39, 34, 30, 24, 18, 11, 8, 3, 0,
-3, -8, -14, -15, -13, -13, -12, -14, -17,
-17, -12, -10, -4, -7, -12, -10, -14, -17,
-17, -19, -25, -28, -27, -29, -30, -31, -35,
-38, -43, -47, -51, -52, -50, -49, -48, -47,
-45, -39, -32, -30, -31, -35, -35, -31, -24,
-17, -12, -11, -14, -15, -17, -16, -9, -5,
-3, -1, 0, 1, 0, 3, 12, 21, 26,
33, 35, 38, 45, 50, 53, 53, 54, 58,
61, 64, 69, 67, 66, 64, 58, 54, 51,
46, 44, 45, 41, 35, 31, 27, 25, 27,
25, 20, 13, 12, 16, 17, 17, 12, 7,
3, 2, -2, -4, -8, -14, -19, -25, -29,
-38, -49, -60, -69, -73, -71, -74, -82, -89,
-98, -103, -104, -103, -99, -98, -98, -98, -99,
-97, -94, -91, -85, -82, -78, -74, -74, -71,
-68, -61, -54, -52, -47, -41, -36, -32, -21,
-12, -3, 11, 26, 36, 44, 48, 55, 64,
77, 92, 100, 108, 117, 120, 122, 128, 130,
129, 130, 127, 124, 122, 121, 118, 114, 110,
102, 92, 85, 80, 77, 68, 55, 46, 39,
36, 34, 31, 27, 15, 5, -1, -5, -11,
-20, -29, -37, -43, -46, -47, -54, -61, -65,
-74, -82, -84, -91, -94, -96, -104, -109, -111,
-111, -112, -113, -111, -112, -110, -104, -99, -96,
-93, -89, -87, -81, -71, -63, -54, -45, -43,
-37, -30, -24, -17, -12, -8, -2, 2, 15,
23, 28, 35, 41, 42, 44, 52, 58, 66,
74, 78, 80, 82, 85, 88, 90, 92, 92,
88, 87, 87, 79, 73, 69, 64, 62, 55,
50, 45, 41, 36, 29, 24, 20, 16, 12,
8, 5, 2, 1, 1, 0, 1, -4, -4,
-4, -4, -1, 1, 2, 1, -3, -6, -1,
5, 6, 7, 8, 4, 2, 0, -2, -3,
0, -3, -4, -3, -4, -5, -8, -15, -20,
-25, -28, -32, -37, -38, -39, -43, -48, -55,
-62, -69, -75, -75, -78, -81, -83, -89, -89,
-92, -91, -91, -89, -83, -81, -74, -66, -63,
-54, -45, -39, -31, -23, -15, -4, 6, 14,
23, 29, 35, 41, 45, 49, 55, 61, 69,
75, 75, 76, 75, 74, 74, 73, 74, 72,
69, 69, 65, 62, 57, 52, 44, 35, 33,
29, 24, 14, 7, 3, -4, -12, -17, -20,
-22, -27, -32, -34, -39, -42, -43, -42, -43,
-40, -38, -36, -36, -37, -36, -33, -31, -27,
-24, -23, -22, -17, -11, -7, -7, -7, -3,
5, 13, 19, 25, 27, 25, 27, 35, 40,
40, 41, 45, 47, 50, 54, 52, 50, 45,
43, 44, 40, 34, 28, 24, 18, 11, 6,
-2, -9, -14, -21, -27, -35, -39, -43, -50,
-57, -62, -66, -68, -71, -72, -73, -74, -76,
-76, -77, -75, -75, -74, -67, -61, -55, -49,
-45, -40, -30, -21, -11, -4, 4, 13, 23,
34, 44, 52, 59, 65, 70, 77, 84, 87,
88, 90, 91, 90, 89, 85, 80, 75, 72,
71, 64, 56, 48, 41, 34, 27, 21, 12,
1, -11, -19, -28, -33, -39, -46, -50, -53,
-58, -63, -66, -71, -73, -76, -76, -74, -73,
-71, -67, -65, -62, -60, -55, -51, -45, -39,
-35, -31, -27, -20, -13, -6, -3, 1, 8,
12, 18, 24, 26, 30, 35, 38, 44, 47,
47, 51, 53, 52, 53, 52, 50, 51, 49,
50, 51, 50, 48, 48, 45, 43, 42, 37,
34, 31, 31, 30, 26, 24, 21, 15, 12,
11, 7, 4, 1, -3, -5, -7, -9, -15,
-21, -26, -28, -31, -35, -39, -46, -48, -49,
-53, -58, -63, -67, -69, -71, -72, -74, -75,
-77, -77, -73, -72, -69, -65, -60, -55, -50,
-47, -43, -38, -30, -25, -20, -12, -4, 4,
9, 16, 20, 24, 28, 35, 43, 50, 58,
61, 65, 72, 74, 74, 76, 79, 78, 76,
78, 76, 76, 74, 70, 64, 59, 52, 46,
41, 33, 26, 19, 12, 5, -2, -8, -15,
-20, -26, -31, -37, -39, -41, -44, -44, -47,
-51, -52, -52, -48, -45, -46, -48, -45, -42,
-40, -36, -32, -27, -24, -22, -18, -16, -11,
-10, -5, 0, 3, 8, 11, 16, 18, 21,
23, 25, 26, 27, 28, 30, 31, 31, 30,
29, 27, 26, 23, 19, 17, 13, 10, 6,
0, -2, -5, -10, -12, -15, -19, -23, -26,
-29, -30, -30, -32, -33, -34, -35, -34, -31,
-29, -29, -28, -28, -23, -19, -17, -12, -12,
-10, -5, -2, 3, 7, 10, 13, 14, 19,
22, 26, 31, 34, 34, 35, 36, 39, 43,
45, 47, 47, 48, 49, 51, 48, 47, 50,
45, 41, 41, 38, 34, 34, 30, 23, 17,
11, 7, 4, -4, -9, -15, -23, -28, -32,
-35, -39, -45, -46, -49, -53, -52, -53, -55,
-56, -56, -55, -54, -53, -53, -51, -47, -44,
-42, -40, -37, -33, -28, -25, -23, -18, -15,
-8, -6, -2, 3, 8, 15, 18, 23, 26,
27, 32, 36, 36, 36, 39, 38, 38, 40,
39, 35, 31, 29, 25, 23, 19, 15, 11,
7, 5, 3, 1, -1, -6, -8, -7, -10,
-9, -10, -11, -10, -7, -6, -8, -6, -5,
-4, 1, 2, 4, 7, 7, 9, 11, 11,
9, 9, 10, 11, 13, 17, 15, 15, 15,
17, 19, 17, 17, 17, 15, 15, 13, 11,
12, 8, 7, 5, 3, 0, -4, -4, -6,
-9, -12, -14, -15, -15, -16, -20, -19, -20,
-20, -20, -18, -18, -21, -22, -21, -21, -23,
-20, -20, -23, -24, -23, -25, -25, -25, -25,
-26, -24, -23, -23, -23, -23, -22, -19, -18,
-15, -14, -10, -8, -4, -1, 1, 3, 6,
8, 9, 14, 19, 22, 24, 26, 29, 32,
31, 34, 39, 42, 42, 46, 49, 50, 50,
52, 53, 52, 49, 49, 48, 48, 46, 45,
40, 34, 30, 25, 21, 17, 13, 10, 6,
2, -4, -9, -12, -15, -18, -21, -26, -28,
-31, -32, -33, -35, -35, -38, -37, -36, -34,
-35, -35, -33, -33, -34, -30, -26, -27, -25,
-23, -22, -18, -15, -16, -12, -9, -9, -6,
-1, 2, 3, 5, 8, 7, 9, 12, 15,
17, 18, 18, 19, 18, 20, 19, 18, 21,
20, 19, 18, 16, 15, 15, 15, 14, 12,
9, 9, 10, 8, 6, 4, 2, 1, -1,
-3, -1, -3, -2, -4, -5, -5, -8, -8,
-10, -10, -8, -8, -8, -7, -8, -8, -8,
-9, -11, -12, -11, -9, -7, -8, -8, -8,
-10, -8, -7, -8, -7, -6, -7, -5, -3,
-3, -3, -3, -2, 0, 3, 3, 5, 7,
10, 11, 10, 10, 12, 13, 16, 16, 16,
17, 15, 16, 17, 16, 14, 16, 13, 11,
11, 9, 9, 6, 4, 4, 3, 0, -2,
-4, -7, -7, -7, -13, -15, -13, -14, -16,
-15, -15, -17, -16, -16, -18, -19, -19, -20,
-19, -16, -15, -13, -12, -10, -7, -6, -4,
-4, -2, 0, 2, 6, 8, 10, 12, 14,
15, 14, 13, 13, 13, 15, 15, 17, 17,
17, 18, 17, 16, 15, 15, 14, 11, 9,
8, 8, 9, 8, 5, 5, 3, -1, -1,
-4, -5, -7, -8, -8, -8, -9, -10, -8,
-11, -12, -12, -12, -12, -13, -11, -11, -9,
-8, -7, -8, -7, -6, -7, -6, -5, -4,
-4, -2, -2, -3, -2, -2, -3, 0, -1,
-3, 1, 1, 2, 4, 3, 5, 6, 3,
3, 4, 3, 3, 4, 5, 4, 6, 7,
7, 7, 6, 3, 3, 5, 3, 3, 6,
6, 7, 6, 4, 5, 2, 1, 1, 0,
0, 2, 1, 1, 1, -1, -2, -3, -5,
-4, -5, -4, -4, -6, -4, -4, -4, -5,
-6, -5, -6, -5, -4, -5, -4, -3, -4,
0, 2, 2, 2, 2, 2, 2, 3, 3,
5, 6, 6, 5, 6, 7, 6, 8, 6,
5, 5, 5, 6, 6, 6, 5, 5, 2,
2, 1, 2, 0, -1, -1, -1, -1, 0,
-1, -4, -6, -8, -8, -9, -8, -7, -6,
-5, -5, -6, -3, -4, -5, -4, -7, -6,
-4, -2, -1, -1, 1, 1, 1, 1, 1,
2, 2, 1, 3, 4, 4, 6, 6, 6,
6, 4, 4, 4, 4, 3, 2, 2, 2,
2, 1, 1, 1, 0, 1, 1, 0, -2,
-2, -3, -3, -3, -3, -5, -4, -3, -5,
-5, -3, -5, -4, -4, -2, -2, -2, -1,
-3, -2, -2, -1, -3, -2, -1, -2, -2,
-2, 0, 0, 0, 0, 0, 1, 0, 0,
1, 2, 3, 3, 3, 4, 5, 4, 3,
4, 5, 5, 7, 7, 6, 9, 8, 6,
7, 8, 6, 5, 7, 8, 8, 8, 7,
6, 5, 4, 4, 4, 5, 4, 2, 1,
2, 1, 0, -2, -3, -2, -4, -6, -6,
-7, -7, -8, -9, -9, -9, -9, -9, -9,
-9, -10, -10, -10, -8, -7, -8, -6, -5,
-4, -3, -5, -2, -2, -2, -1, -1, 0,
1, 1, 2, 3, 2, 4, 3, 3, 5,
3, 3, 5, 4, 5, 6, 5, 4, 5,
3, 2, 2, 3, 4, 4, 4, 4, 4,
3, 4, 4, 4, 3, 2, 2, 2, 2,
2, 2, 2, 2, 1, 1, 1, 2, 1,
1, 2, 1, 1, 2, 1, 1, 1, -1,
0, 1, 0, -1, 1, -1, -1, -1, -2,
-1, -1, -1, -1, -1, -1, -1, -1, -2,
-1, 0, -1, -1, 1, 1, 2, 0, -1,
0, -1, -1, 0, 0, 1, 2, 2, 2,
1, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 0, -1, -2, -1, -3, -4,
-4, -4, -4, -4, -4, -4, -3, -3, -5,
-6, -4, -2, -2, -1, -1, -1, -2, 1,
-1, 1, 0, 0, 1, 1, 1, 1, 2,
1, 2, 2, 3, 3, 3, 3, 4, 5,
5, 5, 5, 5, 5, 5, 5, 6, 6,
5, 5, 5, 6, 6, 5, 3, 6, 5,
4, 5, 3, 2, 2, 2, 2, 1, 1,
2, 0, -1, 0, -1, -1, -1, -1, -1,
-1, -1, -3, -3, -3, -3, -4, -4, -5,
-6, -6, -6, -6, -6, -6, -5, -5, -6,
-5, -4, -4, -4, -4, -2, -2, -2, -1,
-2, 0, 1, 0, 1, 3, 4, 4, 4,
4, 4, 4, 5, 4, 4, 4, 5, 7,
5, 4, 4, 4, 4, 3, 2, 2, 2,
2, 2, 0, 1, 1, 0, 1, 1, -1,
0, -1, -2, -1, -3, -4, -4, -3, -5,
-5, -5, -5, -5, -5, -4, -3, -3, -2,
-3, -2, -2, -5, -3, -3, -3, -2, 0,
1, 1, 1, 1, 1, 1, 1, 1, 3,
3, 4, 4, 4, 4, 5, 5, 2, 3,
4, 3, 5, 4, 3, 4, 3, 3, 5,
5, 3, 4, 2, 1, 1, 3, 4, 3,
1, 3, 2, 1, 2, 1, 0, 1, 0,
1, 0, 1, 1, 1, 1, 0, -1, 0,
0, -1, -1, -2, -1, -1, -2, 0, -1,
-2, -1, -1, -2, -2, -1, -3, -3, -3,
-3, -3, -4, -3, -5, -6, -4, -4, -5,
-4, -3, -5, -6, -4, -5, -6, -4, -3,
-5, -4, -3, -4, -3, -2, -2, -2, 0,
0, 1, 1, 0, 0, 0, 1, 1, 3,
3, 3, 4, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, -2, -1, 1, 0, -1, -2, -2,
0, 1, 0, 1, 1, 1, 1, 0, 0,
1, 0, 0, 2, 1, 0, 1, 1, 1,
1, 3, 3, 3, 4, 3, 3, 4, 2,
2, 2, 2, 2, 2, 2, 1, 2, 2,
2, 2, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -3, -3, -3, -5, -4, -5, -5,
-5, -5, -7, -7, -7, -8, -7, -8, -7,
-8, -8, -7, -8, -8, -8, -8, -7, -6,
-6, -6, -7, -6, -6, -5, -5, -3, -2,
-2, -1, 0, -1, 0, 1, 2, 2, 3,
3, 3, 6, 7, 7, 7, 8, 9, 8,
10, 10, 9, 10, 11, 9, 10, 12, 11,
10, 9, 9, 9, 9, 10, 9, 6, 6,
5, 5, 6, 3, 1, 1, 0, 1, 0,
0, 1, -1, -2, -2, -1, -3, -3, -2,
-4, -4, -3, -2, -4, -4, -4, -5, -3,
-3, -5, -3, -3, -5, -4, -2, -2, -3,
-3, -1, 0, -1, 0, 0, 0, -2, -1,
0, -1, -2, -2, -2, -2, -1, -3, -2,
-3, -4, -3, -3, -3, -3, -3, -3, -3,
-2, -4, -6, -5, -3, -2, -4, -3, -2,
-4, -4, -4, -3, -4, -5, -4, -5, -3,
-2, -5, -2, -4, -4, -3, -2, -1, -1,
-1, 0, 2, 2, 1, 1, 3, 3, 3,
3, 4, 4, 5, 6, 5, 5, 6, 7,
7, 7, 8, 8, 7, 9, 9, 9, 9,
10, 9, 9, 9, 9, 9, 9, 8, 7,
9, 9, 6, 7, 5, 2, 3, 2, 1,
1, 0, -2, -2, -2, -3, -3, -2, -2,
-4, -5, -4, -4, -4, -4, -5, -4, -4,
-5, -4, -5, -4, -5, -6, -4, -4, -5,
-5, -5, -5, -6, -4, -4, -4, -3, -2,
-3, -3, -2, -2, -1, -2, -3, -1, 0,
-1, 0, 0, 0, 0, 1, 0, 0, 0,
0, -1, 1, 1, 1, 0, -2, -2, -3,
-3, -4, -4, -6, -7, -5, -4, -5, -5,
-4, -6, -8, -7, -6, -5, -5, -5, -4,
-4, -5, -4, -3, -3, 0, 0, -2, -1,
0, 0, 1, 1, 2, 2, 2, 2, 2,
4, 5, 5, 5, 6, 7, 7, 9, 10,
10, 10, 12, 12, 13, 14, 14, 14, 15,
15, 15, 15, 15, 15, 14, 15, 15, 12,
13, 13, 12, 10, 11, 11, 11, 10, 8,
6, 5, 7, 6, 6, 4, 3, 4, 5,
3, 2, 2, 1, 1, 2, 3, 1, 0,
0, 1, 0, -2, -1, -2, -3, -3, -3,
-3, -4, -6, -8, -9, -9, -10, -12, -14,
-15, -18, -21, -21, -21, -21, -22, -24, -26,
-26, -27, -27, -28, -26, -25, -26, -28, -27,
-24, -23, -23, -24, -21, -17, -17, -15, -12,
-12, -12, -12, -9, -7, -6, -5, -3, -3,
-2, 0, 0, 1, 3, 7, 6, 4, 6,
7, 8, 11, 10, 10, 13, 15, 14, 13,
18, 20, 18, 19, 21, 23, 24, 23, 22,
24, 26, 26, 26, 27, 25, 23, 25, 27,
28, 28, 28, 23, 19, 23, 24, 20, 20,
21, 15, 13, 15, 16, 14, 11, 8, 7,
8, 11, 11, 6, 4, 8, 7, 6, 7,
6, 4, 7, 13, 12, 7, 8, 8, 4,
1, 1, 1, 2, -4, -12, -18, -24, -25,
-25, -32, -41, -55, -59, -61, -75, -87, -96,
-109, -122, -133, -141, -148, -157, -168, -180, -191,
-198, -202, -207, -206, -207, -211, -211, -208, -203,
-189, -171, -153, -132, -114, -96, -75, -54, -30,
-5, 19, 43, 61, 77, 93, 106, 123, 143,
161, 182, 198, 202, 201, 209, 229, 242, 240,
235, 239, 249, 258, 255, 242, 233, 245, 268,
278, 256, 223, 223, 253, 263, 235, 198, 178,
188, 215, 230, 200, 143, 113, 128, 158, 158,
128, 99, 90, 82, 70, 56, 32, 7, 14,
46, 36, -23, -71, -76, -54, -36, -39, -74,
-118, -134, -122, -101, -104, -129, -164, -174, -129,
-86, -109, -184, -219, -191, -147, -141, -183, -249,
-290, -269, -236, -266, -346, -394, -366, -325, -353,
-431, -472, -406, -313, -316, -398, -449, -401, -287,
-194, -164, -193, -245, -212, -55, 75, 67, 26,
67, 165, 237, 269, 293, 319, 333, 368, 414,
432, 463, 488, 448, 404, 391, 377, 361, 365,
376, 308, 197, 150, 129, 73, 53, 91, 43,
-107, -165, -54, 1, -148, -312, -273, -125, -62,
-128, -258, -294, -141, 70, 57, -217, -378, -145,
198, 289, 169, -47, -219, -101, 264, 458, 217,
-163, -199, 13, 121, 101, -51, -293, -319, -62,
24, -274, -474, -296, -170, -336, -422, -285, -248,
-302, -130, 98, -11, -257, -146, 184, 278, 264,
331, 192, -35, 235, 805, 830, 315, 82, 322,
503, 522, 619, 557, 242, 163, 399, 507, 489,
618, 602, 156, -164, 112, 476, 406, 94, -154,
-242, -132, 56, 5, -325, -566, -527, -478, -624,
-692, -561, -551, -744, -836, -671, -520, -626, -736,
-647, -581, -639, -687, -702, -739, -665, -383, -236,
-414, -513, -321, -114, -43, 32, 65, -98, -236,
34, 608, 924, 680, 218, 56, 329, 847, 1214,
1006, 341, 11, 340, 667, 553, 353, 355, 415,
416, 364, 257, 108, 6, 113, 293, 233, 46,
4, 25, -10, -12, 55, 40, -65, -56, -26,
-101, -61, 143, 229, 78, -161, -210, 103, 424,
377, 86, -274, -491, -328, -37, 60, 128, 188,
-105, -625, -823, -464, 138, 389, 111, -343, -526,
-306, 13, 205, 250, -35, -554, -764, -498, -42,
167, -210, -639, -448, -101, -110, -171, -74, -39,
47, 424, 616, 324, 98, 367, 853, 942, 416,
-184, -130, 339, 472, 369, 239, -165, -418, 101,
742, 659, 325, 365, 476, 233, -14, 270, 785,
719, -29, -533, -220, 237, 305, 179, -190, -644,
-610, -380, -526, -601, -237, 48, -36, -124, -49,
-6, 23, 117, 55, -199, -428, -512, -338, -238,
-424, -323, -135, -464, -657, -189, 100, -379, -964,
-893, -346, -64, -322, -650, -480, 32, 238, 201,
386, 616, 611, 400, 195, 357, 842, 1051, 832,
712, 829, 1070, 1307, 1081, 551, 363, 544, 623,
239, -374, -609, -230, 375, 486, -52, -446, -270,
181, 645, 601, -135, -654, -256, 567, 840, 380,
-54, 18, 334, 386, 21, -214, 83, 243, -316,
-937, -1074, -1006, -896, -674, -424, -331, -354, -380,
-481, -392, 80, 358, 171, -170, -624, -796, -130,
706, 803, 381, 152, 367, 620, 685, 655, 347,
36, 180, 417, 412, 358, 288, 189, 150, 16,
-240, -428, -428, -266, -335, -819, -1150, -946, -587,
-437, -580, -961, -1218, -1065, -704, -431, -350, -315,
-214, -162, -81, 26, -8, -52, -117, -226, -40,
285, 241, -2, -69, 57, 207, 81, -144, -69,
65, 84, 49, -168, -248, 126, 502, 472, 192,
120, 442, 667, 551, 512, 634, 814, 1014, 1098,
1156, 1112, 974, 1144, 1330, 1099, 825, 847, 877,
555, 2, -243, -102, -196, -471, -377, -235, -439,
-622, -547, -470, -495, -431, -197, -21, 21, -9,
-246, -438, -238, -31, 0, 96, 137, -25, -211,
-181, -149, -350, -368, -33, 21, -308, -323, 32,
379, 605, 531, 85, -374, -367, 9, 277, 147,
-356, -698, -494, -140, -126, -354, -549, -673, -642,
-428, -269, -273, -246, -216, -349, -323, -16, 32,
-387, -742, -662, -434, -223, 41, 140, -58, -227,
-80, 93, 20, -166, -360, -536, -555, -305, -33,
-23, -86, -75, -9, 82, -1, -156, 24, 532,
916, 956, 835, 901, 1127, 1279, 1417, 1435, 1144,
822, 862, 1214, 1352, 1001, 611, 539, 532, 369,
189, 170, 308, 465, 430, 232, 64, 14, 51,
-37, -244, -321, -276, -144, 57, 77, -215, -467,
-335, -186, -245, -133, -81, -588, -1130, -959, -520,
-631, -1122, -1270, -971, -873, -1118, -1157, -1078, -1296,
-1365, -1010, -873, -1138, -1061, -379, 89, 51, 177,
372, 185, -14, 63, 197, 125, -123, -60, 243,
195, 88, 201, 115, -63, -12, -79, -492, -751,
-489, 49, 163, -293, -424, -52, 229, 302, 212,
217, 315, 70, -207, -210, -173, 129, 619, 556,
213, 181, 170, 112, 167, 322, 451, 206, -136,
58, 426, 526, 524, 394, 387, 568, 481, 297,
164, 8, 263, 664, 777, 943, 989, 934, 1283,
1495, 1153, 861, 738, 582, 614, 692, 655, 629,
432, 127, -119, -338, -313, -138, -204, -561, -994,
-1168, -948, -700, -658, -788, -1053, -1027, -684, -566,
-528, -355, -335, -323, -28, 206, 87, 56, 387,
585, 296, 24, 261, 492, 248, -132, -469, -674,
-502, -235, -255, -517, -847, -1038, -965, -707, -630,
-767, -639, -298, -193, -290, -310, -118, 74, -77,
-337, -324, -120, 187, 323, -72, -552, -454, -14,
29, -427, -803, -735, -586, -762, -918, -783, -649,
-723, -857, -786, -626, -591, -417, -83, 167, 262,
49, -161, 157, 842, 1298, 1356, 1206, 1041, 1194,
1461, 1323, 1070, 1221, 1687, 2051, 2002, 1673, 1464,
1550, 1851, 1907, 1531, 1327, 1399, 1342, 1287, 1264,
1152, 1030, 878, 716, 601, 454, 264, 264, 352,
151, -193, -296, -161, -93, -215, -423, -617, -668,
-547, -416, -464, -807, -1175, -1174, -1045, -1076, -1023,
-829, -710, -745, -1069, -1443, -1417, -1099, -939, -1165,
-1307, -1056, -843, -638, -304, -190, -334, -578, -770,
-705, -675, -947, -957, -565, -437, -617, -843, -1015,
-813, -489, -584, -904, -1054, -797, -229, -26, -208,
-66, 398, 710, 644, 390, 413, 726, 992, 1204,
1337, 1234, 1104, 1038, 1001, 1043, 982, 847, 885,
1024, 1098, 1138, 1108, 1038, 966, 885, 882, 878,
929, 1005, 944, 1008, 1284, 1415, 1289, 1007, 760,
812, 947, 806, 455, 111, -72, -290, -611, -626,
-559, -765, -1034, -1375, -1632, -1565, -1588, -1728, -1585,
-1477, -1547, -1533, -1371, -1103, -995, -1090, -1102, -947,
-686, -403, -295, -250, -107, -86, -171, -150, 12,
234, 283, 185, 300, 461, 393, 382, 434, 378,
306, 202, 195, 253, -8, -307, -105, 264, 342,
212, 34, -57, 78, 435, 571, 180, -165, -51,
339, 705, 683, 464, 658, 958, 825, 579, 465,
390, 241, 61, 202, 429, 128, -122, 241, 406,
39, -167, -60, 15, -31, -68, 146, 402, 344,
227, 208, 87, -25, -31, -66, -169, -249, -87,
75, -181, -438, -249, 49, 87, -40, -16, 53,
-86, -74, 98, 78, 110, 169, -84, -323, -251,
-102, -172, -513, -750, -675, -568, -587, -583, -523,
-450, -302, -245, -356, -480, -590, -495, -183, -105,
-191, -215, -308, -206, 39, 4, -77, -21, 74,
186, 218, 356, 611, 489, 83, 13, 246, 371,
348, 240, 61, -66, -107, -170, -205, -74, 200,
277, 45, -11, 180, 263, 100, -74, 102, 246,
6, -154, -162, -197, -128, -189, -227, -49, -238,
-490, -333, -188, 1, 215, 150, 144, 128, -33,
187, 532, 676, 911, 773, 283, 351, 673, 620,
349, 105, 205, 425, 325, 295, 372, 340, 511,
628, 394, 224, 187, 91, -174, -556, -482, -37,
-9, -226, -382, -568, -466, -208, -241, -426, -656,
-814, -788, -902, -1065, -946, -860, -896, -831, -744,
-672, -685, -743, -723, -783, -813, -570, -341, -239,
-57, 137, 348, 576, 593, 454, 429, 503, 449,
238, 173, 350, 423, 419, 530, 501, 272, 156,
207, 295, 404, 568, 676, 419, 30, 113, 463,
550, 473, 349, 126, 33, 144, 207, 193, 267,
304, 81, -252, -401, -368, -347, -404, -452, -408,
-272, -40, 234, 281, 48, -72, -18, 54, 208,
309, 285, 245, 164, 38, -20, 148, 430, 563,
655, 679, 453, 300, 319, 219, 25, -15, 54,
-117, -444, -431, -135, -147, -468, -667, -722, -593,
-301, -217, -428, -642, -598, -400, -422, -602, -628,
-554, -509, -501, -541, -488, -250, -129, -284, -441,
-358, -161, -82, 4, 134, 157, 290, 516, 582,
702, 859, 871, 858, 759, 615, 616, 754, 839,
725, 464, 259, 187, 127, 150, 280, 238, 92,
78, 5, -86, 6, 67, -14, -92, -143, -211,
-89, 213, 300, 107, -91, -154, -153, -238, -355,
-314, -227, -168, -92, -142, -219, -156, -47, 53,
-15, -195, -161, -186, -382, -395, -297, -238, -240,
-390, -502, -336, -97, -29, -116, -290, -289, -67,
74, 112, 119, 182, 358, 382, 315, 341, 290,
218, 190, 101, -51, -168, -132, -41, -39, -15,
104, 186, 151, 68, 89, 154, 67, 10, 143,
120, -185, -382, -365, -263, -145, -111, -159, -190,
-53, 151, 177, 179, 384, 553, 502, 490, 572,
600, 573, 442, 119, -212, -260, -166, -318, -506,
-413, -279, -285, -354, -390, -278, -142, -85, -18,
-19, -121, -143, -32, 88, 118, 42, -96, -187,
-167, -113, -172, -270, -256, -178, -192, -249, -128,
103, 132, -47, -147, -104, -56, -9, 45, 35,
109, 315, 381, 326, 336, 457, 667, 786, 675,
489, 460, 569, 595, 470, 303, 272, 448, 620,
545, 226, -92, -128, 91, 172, -98, -385, -378,
-264, -284, -362, -314, -148, -72, -198, -350, -353,
-344, -389, -353, -292, -327, -413, -473, -519, -588,
-577, -546, -737, -989, -1030, -997, -1010, -861, -683,
-731, -690, -419, -197, -47, 112, 167, 74, 41,
176, 309, 438, 671, 781, 793, 868, 904, 991,
1099, 987, 812, 816, 869, 766, 605, 633, 728,
592, 424, 460, 405, 170, 75, 30, -105, -58,
63, -58, -242, -359, -415, -255, -44, -127, -266,
-191, -187, -296, -273, -260, -341, -345, -324, -384,
-467, -421, -233, -125, -227, -341, -256, -168, -217,
-249, -302, -447, -425, -274, -289, -299, -229, -275,
-272, -103, -57, -117, -106, -162, -256, -184, -31,
51, 69, 31, -19, 72, 256, 318, 331, 254,
28, -7, 121, 48, -64, 58, 183, 152, 161,
201, 167, 190, 287, 278, 157, 56, 103, 332,
460, 299, 166, 238, 308, 374, 508, 509, 373,
275, 270, 298, 229, 185, 192, 23, -160, -80,
67, 31, -170, -378, -384, -330, -500, -648, -615,
-686, -716, -510, -510, -771, -752, -475, -434, -556,
-480, -403, -515, -464, -255, -177, -105, 29, 95,
152, 210, 190, 180, 279, 408, 325, 225, 462,
607, 537, 759, 1022, 973, 945, 964, 846, 818,
952, 907, 584, 313, 302, 428, 533, 479, 260,
178, 262, 185, 18, -77, -263, -370, -208, -240,
-589, -739, -572, -444, -405, -357, -475, -738, -771,
-542, -441, -529, -651, -803, -823, -556, -285, -227,
-233, -202, -168, -110, -78, -220, -302, -56, 129,
-60, -149, 54, 130, 169, 324, 231, 24, 89,
269, 320, 262, 231, 225, 138, 67, 153, 310,
399, 269, -21, -197, -183, -59, 144, 234, -13,
-274, -168, 32, -37, -277, -417, -441, -416, -324,
-312, -467, -540, -373, -166, -161, -297, -365, -341,
-246, -69, 81, 99, -3, 11, 305, 540, 449,
394, 586, 667, 606, 685, 665, 425, 410, 585,
509, 360, 424, 538, 583, 482, 250, 159, 310,
423, 217, -131, -280, -204, -51, -12, -204, -338,
-232, -143, -201, -306, -374, -336, -229, -257, -453,
-576, -497, -379, -326, -302, -372, -504, -453, -229,
-133, -226, -328, -326, -261, -151, -6, 97, 143,
164, 143, 138, 267, 433, 500, 470, 297, 143,
279, 504, 556, 475, 333, 233, 225, 228, 198,
128, 24, -17, 4, -55, -187, -251, -213, -119,
-94, -214, -357, -349, -246, -195, -183, -261, -440,
-533, -476, -341, -213, -170, -220, -299, -220, -8,
51, -11, 19, 172, 292, 189, 9, -6, 102,
238, 384, 477, 448, 353, 304, 354, 473, 543,
400, 229, 275, 380, 425, 415, 371, 398, 460,
377, 202, 154, 199, 110, -123, -365, -524, -524,
-360, -134, -47, -182, -348, -453, -542, -503, -376,
-398, -521, -595, -621, -560, -439, -284, -115, -80,
-123, -57, 28, -15, -60, -9, 47, 119, 203,
288, 435, 571, 635, 706, 750, 627, 436, 345,
330, 398, 460, 368, 213, 127, 140, 215, 202,
58, -99, -244, -387, -470, -527, -637, -754, -791,
-768, -742, -739, -735, -704, -649, -552, -479, -491,
-494, -454, -433, -422, -398, -315, -115, 75, 175,
244, 307, 360, 398, 460, 532, 529, 446, 422,
497, 541, 504, 541, 702, 803, 744, 645, 621,
727, 877, 873, 734, 593, 513, 523, 516, 412,
336, 334, 274, 199, 163, 123, 125, 117, 107,
140, 72, -73, -114, -68, -15, 13, -122, -338,
-367, -325, -386, -497, -608, -634, -546, -477, -427,
-377, -412, -464, -436, -343, -276, -327, -390, -313,
-149, -17, 2, -93, -146, -104, -76, -87, -131,
-224, -280, -194, -46, 12, -76, -189, -151, 18,
160, 200, 99, -81, -149, -95, -31, -6, -45,
-93, -97, -71, 0, 73, 34, -82, -129, -102,
-84, -96, -107, -69, -5, 6, 18, 48, 35,
27, 32, -4, -71, -30, 119, 205, 266, 352,
325, 237, 282, 352, 358, 342, 265, 203, 200,
159, 120, 159, 195, 185, 133, 37, 20, 152,
312, 363, 316, 255, 251, 259, 211, 160, 86,
-4, -30, -79, -154, -213, -271, -243, -146, -147,
-211, -283, -319, -219, -157, -207, -237, -252, -245,
-136, 0, 42, -22, -108, -82, 34, 130, 179,
152, 98, 105, 110, 116, 180, 175, 66, -9,
-9, 36, 82, 75, 12, -39, -14, 23, 1,
12, 31, -61, -155, -184, -158, -86, -60, -67,
-63, -84, -100, -81, -115, -171, -157, -150, -179,
-191, -209, -245, -217, -128, -54, -42, -73, -100,
-88, -10, 104, 199, 249, 227, 201, 204, 151,
83, 75, 87, 84, 67, 34, 18, 44, 110,
218, 275, 232, 190, 209, 263, 294, 256, 174,
108, 37, -54, -110, -129, -179, -293, -360, -339,
-282, -190, -135, -188, -239, -234, -227, -182, -127,
-89, -51, -73, -136, -151, -85, 0, 72, 129,
122, 65, 44, 103, 202, 272, 252, 170, 148,
167, 152, 130, 127, 79, 14, 70, 157, 142,
109, 70, -25, -57, -6, 46, 98, 135, 135,
82, 16, 10, 68, 87, -20, -120, -116, -98,
-102, -129, -204, -271, -282, -252, -216, -215, -221,
-156, -70, -66, -120, -156, -146, -126, -84, -15,
-21, -76, -8, 131, 146, 86, 42, 12, 44,
110, 169, 171, 91, 68, 173, 262, 248, 160,
36, -90, -109, -24, -12, -57, -64, -78, -89,
-75, -87, -101, -82, -72, -76, -81, -63, -34,
-4, 61, 87, 46, 23, -1, -8, 40, 63,
46, 45, 39, 14, -11, -25, -16, 36, 78,
85, 110, 120, 132, 189, 228, 217, 154, 89,
57, 14, -14, -6, 0, 13, 8, -50, -68,
-60, -107, -140, -126, -122, -151, -147, -118, -105,
-85, -83, -100, -139, -195, -194, -168, -183, -173,
-148, -166, -168, -123, -59, -11, 20, 64, 98,
80, 58, 83, 111, 143, 176, 171, 152, 146,
165, 174, 143, 93, 30, 5, 21, 42, 35,
-37, -94, -61, -12, -5, -27, -58, -85, -81,
-11, 79, 65, -14, -17, 15, -4, -2, 39,
20, -29, -19, 3, -11, -39, -62, -43, -34,
-60, -77, -119, -163, -128, -5, 87, 73, 51,
116, 189, 217, 240, 234, 177, 192, 295, 344,
313, 263, 236, 240, 230, 179, 99, 19, -25,
-16, -9, -35, -66, -53, -16, -40, -70, -81,
-102, -86, -87, -156, -225, -228, -145, -52, -22,
-57, -171, -255, -247, -208, -165, -187, -242, -275,
-261, -168, -75, -13, 8, -62, -125, -136, -133,
-81, -11, -17, -80, -115, -103, -27, 71, 134,
137, 44, -48, -24, 69, 156, 194, 175, 112,
55, 54, 101, 148, 157, 142, 100, 44, 27,
63, 106, 107, 89, 67, 37, 17, 30, 63,
69, 61, 21, -37, -55, -72, -53, -26, -53,
-77, -87, -109, -119, -80, -36, -29, -38, -48,
-57, -65, -16, 52, 83, 83, 24, -27, -14,
9, 27, 52, 50, 45, 90, 132, 117, 75,
16, -1, 60, 95, 55, 25, 26, 20, 61,
119, 89, 1, -61, -68, -46, -36, -40, -39,
-49, -58, -16, 30, 13, -12, 18, 35, 6,
3, 30, 22, 25, 52, 32, 12, 9, -5,
-16, -25, -33, -38, -44, -76, -118, -118, -96,
-54, -3, 9, -31, -82, -84, -35, 18, 25,
-26, -72, -48, 8, 25, 8, -20, -66, -105,
-102, -80, -73, -79, -80, -70, -59, -55, -82,
-113, -85, -51, -59, -57, -38, -13, -7, -18,
-6, 20, 51, 55, 18, -8, -7, 24, 78,
119, 137, 135, 139, 153, 144, 155, 179, 166,
128, 56, 8, 38, 85, 94, 72, 20, -32,
-9, 25, 17, -15, -84, -123, -106, -82, -62,
-60, -43, -4, -12, -45, -68, -108, -100, -47,
-49, -64, -50, -9, 37, 59, 68, 62, 53,
49, 25, 13, 32, 40, 60, 109, 82, 18,
10, -1, 21, 102, 111, 40, -10, -9, 20,
31, 0, -51, -108, -135, -89, -21, 1, -54,
-125, -129, -113, -144, -205, -227, -167, -118, -114,
-100, -71, 5, 34, -51, -119, -120, -72, 10,
56, 51, 58, 65, 98, 135, 84, 20, -3,
-1, 57, 135, 137, 90, 88, 107, 102, 45,
-4, 9, 48, 95, 99, 65, 42, 44, 78,
80, 29, 11, 39, 27, 0, 7, 19, 10,
-45, -99, -86, -77, -74, -57, -74, -84, -92,
-134, -114, -65, -73, -76, -96, -105, -50, -31,
-17, 17, 9, 18, 62, 75, 55, 63, 76,
61, 61, 80, 103, 107, 110, 131, 134, 120,
94, 66, 70, 78, 59, 52, 57, 53, 72,
76, 31, -18, -53, -57, -35, -17, -9, -27,
-34, -7, -17, -26, -13, -60, -86, -53, -42,
-36, -36, -46, -13, 19, -16, -47, -15, 11,
-9, -18, -26, -24, 14, 8, -53, -54, 15,
43, 15, -9, -5, 5, -12, -40, -57, -74,
-94, -105, -91, -20, 30, -10, -50, -58, -52,
-42, -47, -54, -61, -83, -64, -30, -3, 31,
9, -35, -43, -31, 6, 50, 54, 55, 67,
53, 43, 30, 27, 62, 37, -26, -52, -54,
-29, 3, -12, -23, 11, 26, 23, 31, 57,
66, 46, 32, 35, 83, 124, 111, 124, 157,
143, 101, 80, 60, 27, 11, 21, 22, 9,
-4, -26, -41, -35, -50, -103, -138, -116, -90,
-89, -90, -79, -74, -58, -18, -12, -29, -36,
-17, 22, 30, -1, -8, 8, 10, 19, 31,
36, 38, 41, 28, -7, -14, -6, -20, -30,
-11, -2, -9, 0, 25, 56, 78, 68, 40,
34, 47, 50, 40, 37, 26, 28, 53, 61,
57, 25, -35, -75, -65, -48, -65, -81, -67,
-53, -41, 3, 19, -3, -9, -2, -1, -24,
-36, -23, -26, -29, -9, 0, -15, -17, -9,
12, 50, 45, 14, 19, 37, 24, 9, 16,
13, -16, -19, 3, -3, -12, -10, -23, -43,
-47, -38, -46, -44, -7, 3, -19, -13, -26,
-52, -29, -19, -32, 0, 11, -26, -24, -20,
-41, -30, -24, -53, -67, -26, 23, 20, 9,
6, -8, 3, 16, 7, 3, -5, 2, 33,
53, 72, 94, 86, 69, 96, 118, 95, 91,
78, 32, 26, 48, 48, 37, 21, 7, -6,
-8, 8, 1, -17, -2, 18, 1, -28, -51,
-84, -93, -74, -46, -18, -19, -31, -10, 10,
10, 7, -5, -30, -39, -28, -9, 10, 17,
11, 14, 20, -1, 2, 18, 7, 15, 40,
40, 32, 27, 23, 31, 43, 33, 7, -3,
18, 51, 53, 31, 21, 14, 16, 14, 4,
11, 16, 1, -24, -38, -33, -27, -50, -74,
-70, -60, -54, -44, -22, -22, -43, -33, -16,
-35, -36, -18, -27, -42, -46, -36, -17, -15,
-22, -21, -20, -2, 15, 12, 22, 27, 22,
41, 57, 60, 63, 54, 56, 65, 62, 68,
58, 34, 53, 70, 58, 60, 51, 33, 41,
39, 16, -3, -16, -18, -15, -18, -32, -76,
-85, -62, -82, -87, -68, -84, -75, -40, -48,
-55, -45, -42, -24, -14, -1, 27, 23, -1,
-2, 12, 15, 32, 55, 52, 55, 82, 81,
58, 62, 59, 37, 24, 20, 17, 18, 19,
15, 14, 5, -18, -27, -20, -19, -34, -39,
-29, -30, -27, -27, -48, -52, -54, -77, -48,
-18, -36, -34, -13, -21, -38, -28, -15, -7,
-6, -20, -18, 2, 4, -11, -5, 7, 1,
1, 12, -2, -17, 7, 15, 2, 15, 34,
48, 78, 94, 82, 66, 66, 64, 47, 44,
57, 64, 74, 65, 34, 26, 31, 32, 33,
18, 5, -1, -18, -22, -31, -54, -37, -32,
-74, -89, -77, -73, -65, -72, -75, -39, -21,
-31, -31, -24, -19, -8, -4, 7, 26, 22,
15, 13, 11, 28, 47, 42, 35, 28, 5,
18, 55, 55, 45, 44, 18, 9, 18, -2,
-5, 6, -15, -16, -12, -20, -4, 4, -15,
-18, -10, -5, -2, -16, -24, -14, -7, -14,
-33, -33, -20, -17, -17, -18, -30, -37, -35,
-34, -13, -3, -28, -28, -10, -21, -17, -4,
-12, -16, -20, -27, -16, -8, -4, 14, 24,
11, 17, 30, 27, 14, 7, 28, 30, 22,
45, 47, 23, 31, 23, -5, 10, 17, -5,
2, 15, 9, 20, 29, 11, -9, -8, 8,
10, -1, -14, -30, -30, -8, -9, -20, -17,
-17, -12, 1, 6, -7, -18, -6, 10, -6,
-7, 29, 35, 21, 16, 9, 25, 44, 26,
21, 34, 28, 40, 41, 9, -2, 1, 12,
34, 18, -12, -10, -16, -29, -24, -25, -20,
-17, -35, -29, -12, -29, -39, -32, -30, -17,
-12, -28, -20, -5, -4, 7, 14, 10, 3,
-3, 0, 19, 27, 4, -21, -18, -7, -4,
0, 1, -6, -17, -30, -24, -11, -9, 0,
-1, 0, -3, -12, 1, 15, -2, 3, 16,
-3, -8, 7, 3, 13, 32, 23, 10, -6,
-11, 8, 4, -12, -9, 3, 12, -2, -31,
-36, -33, -37, -17, -5, -20, -14, 4, 5,
4, 6, 17, 31, 27, 23, 16, -1, -4,
15, 24, 21, 18, 7, -7, -14, 18, 41,
25, 14, 13, 2, 5, 12, 8, 15, 10,
2, 13, 10, 3, 5, -1, 0, 11, 10,
6, 2, 7, 10, -4, -3, 2, -13, -4,
14, -4, -17, -11, -4, 8, 3, -8, -1,
-7, -20, -4, 23, 23, 8, 5, 24, 21,
-5, -2, 7, -9, -15, -8, -6, 6, 2,
-26, -19, 1, -19, -31, -27, -34, -41, -47,
-39, -12, -12, -29, -32, -41, -36, -26, -36,
-35, -33, -29, -1, 5, -13, -21, -21, -3,
12, 1, -7, -1, 2, 12, 9, -1, 15,
21, 18, 25, 4, -13, 5, 12, 16, 33,
33, 19, 21, 26, 30, 30, 24, 23, 19,
22, 34, 39, 28, 15, 14, 24, 24, 18,
12, 10, 4, 8, 28, 29, 2, -7, 6,
8, 10, 2, -13, -8, -2, 0, 12, 13,
-1, 3, 21, 26, 24, 17, 11, 15, 19,
19, 19, 11, 1, 3, 3, 0, -5, -11,
-16, -26, -18, 3, -5, -17, 2, 10, 6,
6, -8, -11, 4, -3, -17, -10, -17, -37,
-31, -17, -26, -37, -42, -53, -49, -34, -40,
-39, -21, -17, -23, -23, -25, -30, -24, -13,
-10, -10, 1, 1, -7, 7, 19, 11, 4,
-3, -8, 1, 6, 7, 25, 22, -5, 3,
20, 7, -1, 14, 17, 18, 20, 12, 25,
41, 23, 19, 37, 39, 21, 17, 23, 17,
6, 9, 15, 4, -15, -8, 8, 7, 1,
-12, -18, -14, -15, -10, 0, -3, 3, 13,
-8, -21, -8, -26, -29, -1, -9, -24, -19,
-22, -24, -18, -25, -27, -28, -34, -26, -9,
-14, -14, -8, -8, -5, 4, 4, -10, -12,
-7, -8, -10, -15, -19, -10, -5, -9, -9,
-19, -33, -27, -14, -15, -14, -16, -25, -10,
5, -7, -11, 2, 3, 7, 17, 28, 33,
32, 33, 39, 49, 57, 63, 62, 64, 67,
59, 55, 67, 71, 58, 53, 53, 44, 38,
44, 51, 51, 45, 35, 34, 46, 55, 48,
36, 21, 3, -5, 2, 7, 0, -17, -30,
-34, -48, -62, -64, -66, -66, -62, -79, -90,
-85, -88, -88, -85, -88, -103, -112, -112, -102,
-99, -102, -103, -110, -100, -80, -60, -57, -68,
-59, -45, -35, -6, 9, -3, 2, 32, 45,
48, 51, 40, 51, 78, 85, 83, 87, 94,
101, 104, 105, 100, 86, 82, 96, 102, 96,
85, 68, 63, 65, 55, 50, 46, 28, 32,
43, 33, 30, 27, 8, 18, 36, 27, 20,
13, -14, -19, 8, 12, 0, -1, -12, -24,
-20, -27, -39, -39, -39, -44, -38, -32, -42,
-38, -33, -43, -55, -57, -60, -61, -56, -57,
-55, -43, -46, -58, -55, -50, -50, -51, -48,
-46, -44, -36, -26, -20, -13, -11, -8, 1,
5, 0, 8, 21, 31, 42, 39, 43, 56,
48, 37, 45, 45, 47, 52, 46, 40, 26,
18, 28, 30, 22, 14, 0, -3, 8, 0,
-7, 0, -10, -13, -9, -13, -13, -18, -33,
-32, -26, -37, -41, -32, -26, -30, -34, -31,
-38, -40, -24, -25, -29, -15, -18, -23, -4,
2, -7, 0, 5, 10, 22, 23, 25, 31,
33, 37, 38, 39, 43, 46, 41, 44, 46,
37, 35, 46, 63, 67, 52, 38, 30, 35,
41, 41, 41, 29, 15, 16, 4, -4, 3,
-12, -18, -13, -27, -39, -47, -55, -44, -43,
-53, -45, -36, -37, -37, -38, -40, -49, -57,
-41, -24, -28, -31, -26, -20, -15, -21, -23,
-18, -19, -14, -10, -11, 1, -6, -26, -14,
-1, -7, -10, -11, -9, 0, -4, -9, 3,
8, 0, -2, 1, 16, 20, 7, 9, 10,
8, 18, 12, 11, 17, -6, -19, 0, 0,
-10, -6, -12, -14, -11, -9, -2, -10, -19,
-9, -11, -4, 18, 7, -3, 9, 17, 23,
28, 25, 19, 19, 24, 33, 37, 30, 28,
35, 44, 43, 33, 31, 30, 26, 33, 39,
35, 31, 27, 19, 23, 24, 19, 13, 0,
0, 2, -7, -9, -10, -13, -6, -6, -23,
-28, -15, -9, -20, -34, -30, -15, -12, -11,
-3, -4, -4, 6, 15, 9, -11, -20, 3,
26, 23, 1, -16, -3, 12, 2, -22, -36,
-35, -28, -20, -13, -19, -38, -43, -29, -11,
-5, -15, -37, -40, -9, 12, -1, -23, -30,
-16, 12, 21, -1, -25, -21, 4, 34, 55,
34, -12, -11, 47, 99, 107, 58, 0, 8,
78, 148, 151, 56, -40, -2, 142, 215, 99,
-67, -64, 76, 153, 99, -21, -107, -92, -1,
106, 107, -123, -395, -334, 60, 274, -69, -597,
-626, -126, 238, 18, -447, -577, -312, -34, 20,
-89, -242, -332, -222, 74, 262, 64, -285, -232,
259, 563, 294, -138, -130, 312, 642, 515, 189,
57, 187, 415, 538, 467, 277, 109, 134, 334,
441, 299, 59, -7, 128, 228, 146, -20, -99,
-34, 60, 24, -108, -188, -147, -57, -48, -142,
-224, -210, -144, -122, -175, -212, -176, -150, -199,
-256, -210, -100, -79, -195, -298, -248, -107, -48,
-110, -192, -224, -189, -112, -40, -31, -124, -238,
-193, -3, 87, -53, -221, -165, 48, 132, -2,
-150, -109, 61, 147, 83, -20, -60, -13, 85,
157, 130, 17, -68, -10, 147, 217, 116, -20,
-21, 103, 200, 158, 52, 35, 105, 155, 132,
81, 74, 110, 114, 74, 48, 68, 100, 77,
27, 30, 48, 19, -15, 7, 63, 53, -56,
-123, -41, 81, 75, -61, -154, -84, 45, 68,
-24, -105, -76, 22, 53, -13, -63, -21, 54,
59, -1, -34, 16, 80, 81, 48, 37, 61,
89, 88, 101, 134, 132, 100, 83, 125, 188,
173, 101, 95, 172, 214, 149, 68, 94, 181,
177, 103, 83, 132, 165, 122, 83, 140, 191,
153, 92, 106, 198, 226, 138, 85, 146, 215,
187, 110, 77, 115, 146, 115, 91, 96, 78,
27, -3, 42, 102, 71, -23, -46, 30, 95,
63, -18, -25, 77, 174, 138, 13, -25, 96,
218, 181, 34, -70, -45, 17, 2, -67, -174,
-346, -516, -553, -446, -455, -789, -1213, -1308, -1046,
-878, -1179, -1691, -1839, -1528, -1219, -1292, -1623, -1772,
-1538, -1147, -921, -951, -1038, -929, -549, -95, 155,
127, 97, 387, 931, 1339, 1380, 1234, 1276, 1661,
2102, 2223, 2027, 1848, 1942, 2198, 2295, 2119, 1856,
1725, 1745, 1752, 1601, 1335, 1102, 993, 952, 830,
570, 286, 139, 133, 85, -135, -436, -638, -645,
-571, -620, -835, -1064, -1151, -1069, -951, -964, -1109,
-1209, -1162, -1044, -961, -944, -977, -1001, -912, -687,
-517, -623, -887, -897, -469, 10, -35, -590, -934,
-545, 184, 427, -53, -619, -563, 40, 489, 339,
-128, -306, -6, 403, 497, 232, -55, 0, 388,
704, 584, 145, -76, 260, 816, 942, 485, 2,
65, 575, 923, 744, 290, 76, 276, 596, 662,
419, 134, 92, 280, 434, 344, 88, -66, 8,
151, 126, -81, -239, -176, -29, -74, -351, -574,
-487, -208, -132, -426, -780, -797, -577, -595, -978,
-1169, -667, -36, -548, -2285, -3281, -1756, 927, 1236,
-1911, -5006, -4073, -66, 2017, -295, -3701, -3797, -892,
975, -165, -1978, -1636, 374, 1482, 679, -567, -591,
706, 2337, 3224, 2743, 1269, 287, 1221, 3597, 5083,
4106, 1858, 972, 2334, 4096, 4167, 2806, 1916, 2383,
3045, 2508, 1220, 820, 1784, 2669, 1981, 204, -876,
-470, 510, 803, 170, -787, -1568, -1893, -1598, -1027,
-992, -1803, -2610, -2484, -1905, -2113, -3113, -3399, -2267,
-1261, -2007, -3637, -3909, -2340, -893, -1158, -2272, -2486,
-1639, -915, -777, -596, -91, 196, 85, 210, 875,
1373, 1247, 1219, 1958, 2718, 2328, 1196, 1008, 2350,
3677, 3269, 1503, 366, 922, 2264, 2810, 1996, 608,
-168, 75, 680, 811, 395, -56, -318, -607, -966,
-1108, -925, -613, -368, -369, -919, -1926, -2460, -1685,
-300, 155, -611, -1524, -2204, -3227, -3859, -2037, 1622,
2382, -2583, -8448, -7544, -84, 4814, 915, -6423, -7558,
-1746, 2515, -59, -4587, -3858, 1260, 3625, 187, -4148,
-3500, 1542, 5467, 4780, 1256, -1127, -403, 2481, 5332,
6346, 5014, 2536, 1216, 2467, 5039, 6238, 5070, 3381,
3269, 4173, 3905, 2248, 1586, 3299, 5240, 4362, 1004,
-1382, -489, 2113, 3168, 1620, -742, -1824, -1435, -897,
-1058, -1500, -1545, -1398, -1965, -3266, -4136, -3756, -2609,
-1804, -1986, -3087, -4599, -5296, -4051, -1731, -781, -2228,
-4092, -3977, -2325, -1353, -1568, -1490, -428, 178, -672,
-1650, -1058, 749, 2039, 2079, 1540, 897, 310, 572,
2266, 4265, 4265, 1869, -231, 559, 3332, 4752, 3229,
768, 101, 1364, 2463, 1984, 819, 411, 723, 675,
-162, -923, -743, -32, 185, -516, -1653, -2359, -2103,
-986, 42, -205, -1702, -2870, -2337, -809, -221, -982,
-1544, -946, -598, -2117, -4291, -4100, -857, 1948, 338,
-4799, -7972, -5403, 173, 2371, -1063, -5533, -5578, -1777,
605, -985, -3249, -2213, 1184, 2691, 560, -2356, -2288,
1233, 5244, 6441, 4004, 370, -663, 2555, 7404, 9282,
6573, 2612, 1836, 4662, 7467, 7393, 5421, 4262, 4741,
5362, 4705, 3163, 2397, 3337, 4887, 4810, 2254, -749,
-1316, 772, 2706, 2016, -573, -2552, -2746, -2012, -1647,
-1978, -2579, -3105, -3473, -3911, -4484, -4891, -4795, -4163,
-3543, -3538, -4275, -5356, -5743, -4637, -2614, -1301, -1825,
-3341, -4011, -2937, -751, 1007, 1245, 235, -639, -61,
1626, 2864, 2967, 2734, 3013, 3329, 2914, 2312, 2666,
3839, 4308, 3162, 1453, 768, 1255, 1887, 2006, 1715,
1031, -297, -1660, -1690, -277, 813, -30, -2137, -3370,
-2854, -1553, -593, -413, -1146, -2567, -3440, -2369, -205,
379, -1258, -2315, -812, 262, -3205, -8576, -7894, 738,
7492, 1951, -11595, -17098, -6934, 7139, 8065, -4575, -14199,
-8946, 3606, 7504, -547, -8242, -5113, 4406, 8113, 2134,
-5040, -4089, 4157, 10934, 10158, 4167, -565, -192, 4428,
9765, 12201, 9861, 4512, 1225, 3451, 8483, 10133, 6497,
2574, 3333, 6806, 6986, 2487, -1214, 623, 5416, 6647,
2204, -3289, -4556, -1565, 1544, 1525, -1236, -4293, -5695,
-5174, -3995, -3403, -3449, -3750, -4505, -6014, -7296, -6523,
-3849, -2096, -3288, -5722, -6004, -3581, -1497, -1960, -3330,
-2800, -434, 964, -111, -1739, -1136, 1736, 4151, 3736,
1274, -451, 469, 3386, 5833, 5898, 3646, 1085, 272,
1743, 4061, 5108, 3837, 1490, 246, 967, 1866, 859,
-1069, -974, 1542, 2835, 47, -4285, -5068, -1567, 1781,
1223, -1997, -4227, -3747, -1720, 41, 245, -1228, -2972,
-2673, 22, 1980, -930, -7721, -11271, -5725, 4974, 8484,
-2007, -16979, -19255, -4670, 11057, 9690, -6417, -17537, -10841,
4262, 9292, 206, -9128, -6224, 4828, 10018, 3699, -5183,
-5121, 4702, 14279, 14466, 5778, -2633, -2185, 7036, 16118,
16305, 8081, 390, 499, 6580, 11150, 10036, 5704, 2902,
3378, 4664, 3786, 863, -796, 1216, 4609, 4493, -338,
-5670, -6486, -2751, 884, 571, -3095, -6446, -6997, -5770,
-5041, -5016, -4216, -2579, -2468, -5088, -8129, -7964, -4228,
-323, 497, -1556, -3653, -3615, -1718, 464, 1808, 2386,
2832, 3085, 2905, 2676, 3473, 5501, 7094, 6442, 3929,
1663, 1436, 3254, 5807, 7100, 5044, -34, -4091, -2992,
2149, 5333, 2562, -3067, -5877, -4480, -2080, -1793, -3026,
-3838, -3735, -3663, -4472, -5756, -5753, -3576, -640, -274,
-3965, -7787, -6757, -717, 4380, 3595, -1553, -5936, -8603,
-10223, -8952, -922, 9700, 9355, -7788, -25795, -22413, 2268,
20887, 12133, -11291, -20129, -5899, 10236, 8585, -3645, -6300,
4667, 14216, 9346, -3593, -8558, 715, 15085, 21179, 14887,
3733, -2703, -675, 7170, 15131, 18360, 13959, 4205, -2825,
-656, 7594, 11845, 7182, 319, -439, 3255, 3213, -3299,
-8972, -6318, 2300, 7190, 2254, -9247, -17334, -15064, -4452,
5160, 5127, -4268, -14501, -17256, -11145, -1830, 3786, 2984,
-2498, -8101, -9587, -5703, 622, 4570, 4035, 1442, 729,
2493, 3534, 2433, 2239, 5944, 11438, 12371, 6496, -211,
-156, 7092, 13566, 11979, 3928, -2545, -2226, 2713, 6150,
5117, 1270, -1851, -2859, -2376, -1909, -2364, -3401, -4183,
-3897, -2875, -3205, -5503, -7822, -7501, -3934, -942, -1572,
-4262, -5939, -4671, -2353, -1387, -1159, -1270, -1328, -606,
474, 1044, -2647, -11603, -17081, -10374, 5922, 14849, 2056,
-22033, -31238, -14612, 11094, 17910, 1778, -15538, -15417, -2045,
6690, 2855, -2559, 473, 8823, 11423, 3782, -4649, -2775,
9111, 20847, 21610, 11572, 962, -1465, 5731, 15559, 20008,
16950, 9230, 2204, 114, 3088, 8130, 10523, 7643, 2045,
-2107, -2945, -2538, -3593, -5210, -4403, -857, 1328, -2497,
-11667, -18881, -16866, -6286, 3400, 2835, -7811, -18322, -19279,
-10025, 1525, 6930, 3766, -4647, -11401, -9904, -322, 10100,
12428, 5874, -274, 926, 6762, 9360, 6778, 5904, 10509,
15077, 12681, 3846, -1653, 2460, 11036, 14737, 8967, -1021,
-6168, -3899, 2328, 6041, 3404, -2878, -7672, -6869, -1918,
801, -2188, -7419, -8083, -2687, 1898, -692, -8121, -11198,
-5642, 2830, 5915, 1120, -5666, -8314, -5770, 118, 4614,
4713, 1482, -2544, -3331, -3779, -8931, -13840, -10273, 3355,
13432, 2906, -20058, -30890, -17080, 7759, 16047, 2886, -12525,
-15117, -5998, 1614, 2294, 2684, 4610, 6236, 5486, 2514,
1346, 1962, 4564, 11022, 17438, 18182, 10179, -796, -3019,
5456, 15942, 18468, 11176, 2796, -143, 1670, 3922, 3836,
3337, 3330, 1623, -2609, -7177, -7654, -4250, -2210, -3491,
-5312, -4380, -3103, -6738, -13209, -14278, -6529, 3346, 4931,
-2861, -11176, -12097, -5552, 2679, 7102, 6050, 1301, -3350,
-3378, 1785, 7413, 9059, 7013, 5043, 5331, 5197, 3143,
1862, 3790, 8037, 10159, 7236, 1450, -3393, -3980, 598,
6251, 7410, 1502, -7144, -10260, -5116, 2386, 4197, -894,
-6255, -6026, -1493, 873, -1639, -4426, -2720, 2252, 4206,
158, -4631, -4466, 537, 4709, 4528, 1691, -828, -1394,
-455, 756, 2662, 3101, 1730, -3579, -12987, -18531, -12998,
1944, 11963, 1503, -19826, -29919, -18138, 2254, 7644, -1829,
-9260, -6516, 134, -793, -5234, -2336, 6264, 12828, 11829,
6589, 3429, 2592, 4795, 11433, 19490, 21681, 13136, 379,
-4138, 3585, 14812, 17633, 10124, 623, -2287, 696, 2273,
-926, -5000, -4391, -386, 139, -4657, -11003, -13946, -11930,
-7460, -1932, 1277, -2311, -10543, -16920, -14512, -4039, 4987,
7518, 3175, -4213, -7535, -4747, 3590, 12231, 13419, 8429,
2377, 1080, 5563, 8497, 7304, 5331, 5656, 8235, 6997,
998, -3131, -1857, 3017, 5883, 3744, -408, -4503, -6489,
-4796, -374, 3254, 1651, -2830, -5206, -3690, -681, -969,
-2819, -2616, 19, 3379, 2359, -2476, -6413, -6111, -463,
4664, 4106, -565, -4801, -4960, -1242, 2479, 3706, 2168,
-1104, -3048, -1563, 1217, 2013, -5714, -17921, -21743, -10839,
7751, 13091, -4648, -26509, -29653, -9872, 10100, 9523, -4335,
-12121, -5509, 4923, 6380, 1839, -508, 3312, 10704, 14545,
12317, 5508, -243, 2421, 11485, 19096, 18306, 8626, -1357,
-5542, -1695, 7815, 13549, 10229, -23, -8373, -7496, -2775,
-1016, -2900, -4868, -4103, -4535, -6851, -8099, -8137, -6414,
-4023, -1790, -45, -1513, -4791, -6160, -4105, 1060, 5970,
7099, 3934, -996, -2213, 1973, 6975, 7927, 4726, 2474,
3951, 5221, 2642, -2359, -3579, 1362, 6614, 6282, 116,
-5643, -5733, -1884, 2107, 3418, 2566, 684, -2319, -3803,
-2133, 1512, 2943, 475, -1004, 753, 3095, 1652, -3074,
-4562, -932, 3815, 4486, -22, -4199, -4666, -2201, 284,
316, -914, -2297, -2441, -1538, -435, 909, 626, -1222,
-1534, -429, 1711, 2386, -1786, -10676, -18200, -16272, -3805,
9505, 8238, -9397, -24577, -22256, -4907, 8659, 5940, -3701,
-6764, 40, 6190, 4239, 208, 238, 7081, 14458, 15143,
10726, 3479, -706, 1700, 9131, 17577, 17708, 7959, -5009,
-11508, -5347, 5635, 10789, 6499, -3121, -9303, -9814, -6625,
-3333, -3193, -4349, -5615, -6188, -5123, -4441, -4550, -4074,
-2769, -61, 2441, 2881, 1395, -578, -341, 2509, 6034,
8202, 6377, 2696, 1272, 2589, 4787, 4611, 2378, 2124,
3911, 4872, 2049, -3374, -5770, -2705, 3179, 5905, 2589,
-2792, -5419, -3176, 1056, 2875, 2483, 1205, 605, 856,
1012, 892, 105, -411, 707, 2924, 4184, 1755, -2553,
-4857, -3556, 401, 2466, 945, -2315, -5556, -5549, -2241,
534, 601, -1774, -3034, -1962, -886, -448, -720, -467,
864, 760, -22, -2546, -10211, -17121, -15877, -4803, 7993,
7254, -6563, -18374, -17755, -6143, 3291, 4322, 1822, 416,
2788, 5190, 4256, 2627, 2590, 6398, 12709, 15757, 12829,
5542, -667, 167, 7241, 14346, 14826, 6392, -3516, -7434,
-4607, 1054, 2988, 847, -1549, -2641, -3046, -5363, -8256,
-9130, -6906, -1460, 2260, 1568, -2911, -8580, -9418, -3675,
5021, 10127, 7909, 1478, -4015, -3331, 2450, 7291, 7632,
2567, -2022, -899, 3418, 5544, 1349, -4117, -3409, 1758,
6000, 3526, -3975, -7331, -3931, 2747, 7037, 4962, -21,
-2902, -2008, 1306, 4461, 6364, 5956, 3623, 1734, 793,
44, -893, -1041, 1633, 5264, 4870, -943, -7404, -8611,
-4974, -1192, 185, -1334, -3672, -4910, -5132, -4387, -3532,
-3233, -2430, -469, 1245, 892, -969, -2441, -2140, 320,
4999, 5954, -4638, -20056, -24424, -8954, 13558, 16089, -3145,
-20665, -19447, -4802, 4488, 3733, 943, 683, 3109, 6219,
9247, 7736, 782, -1410, 8024, 20877, 20174, 4723, -7148,
-2758, 11240, 17896, 11462, 414, -6134, -4913, 113, 2818,
98, -5900, -8369, -4446, 924, 1657, -3389, -10569, -13223,
-7690, 2339, 7741, 1634, -9014, -10982, -1172, 9642, 9098,
1310, -2795, -1040, 2790, 3808, 3559, 3064, -527, -3160,
-1391, 3120, 5224, -144, -6714, -6416, -719, 5630, 7253,
2735, -2973, -4325, 679, 7146, 8220, 4055, -42, 814,
5288, 7658, 6592, 3051, -746, -541, 3401, 6030, 1953,
-6340, -8619, -2689, 4076, 3217, -4875, -9612, -7826, -4293,
-2441, -4080, -5740, -5529, -3656, -506, -1035, -5787, -9518,
-7034, 2323, 9287, 6495, -1853, -6110, -3281, -1708, -8958,
-19544, -18870, -2771, 13029, 10762, -7491, -21837, -18923, -4183,
8733, 12580, 9779, 4597, 738, 1460, 6302, 9711, 8375,
8143, 12512, 15808, 11272, 389, -5554, 161, 11080, 15851,
10426, 692, -6372, -6808, -2525, 652, 827, -219, -349,
-622, -3328, -7883, -11020, -8961, -3240, 1884, 4155, 1995,
-3530, -7816, -6444, -218, 6086, 9279, 7901, 3113, -2352,
-5757, -3836, 2022, 4572, 894, -3519, -3311, -534, -618,
-3716, -5515, -3290, 1495, 4374, 4455, 2961, -645, -3247,
-656, 5273, 9838, 9751, 5755, 1863, 158, 1457, 4585,
6390, 5379, 2894, 2284, 1867, -2279, -7051, -6578, 70,
4745, 1660, -4524, -8007, -7088, -5690, -5467, -4178, -2679,
-2218, -3422, -4167, -4313, -6105, -6633, -4202, 864, 5119,
4084, -163, -5331, -8699, -8710, -7313, -4649, -2471, -1419,
-1136, -3199, -6428, -8048, -4902, 1089, 4681, 5723, 5535,
5146, 4006, 2052, 2314, 5274, 8680, 9907, 8776, 6722,
2548, -2403, -3303, 1224, 7406, 9468, 5089, -1197, -4384,
-3570, -298, 1776, 2005, 2041, 1326, 971, -180, -2334,
-1170, 1913, 4281, 4732, 2874, 1174, -1341, -3384, -2503,
368, 4031, 3270, -986, -3519, -5360, -6004, -5576, -3603,
208, 708, -2137, -4940, -5349, -3588, -2796, -1399, 1017,
3144, 4196, 2483, 828, 338, 919, 3842, 6202, 7189,
7499, 6330, 4847, 3252, 2136, 3698, 5845, 5566, 3019,
267, -55, -1091, -4220, -5041, -3430, -280, 171, -4649,
-8723, -9280, -5975, -3192, -3974, -3912, -4053, -3748, -3570,
-5871, -5499, -3552, -1691, 320, 341, 748, -313, -3436,
-4687, -3681, 21, 2550, 643, -2123, -3254, -2226, -1044,
-1617, -1510, 183, 1250, 726, -1662, -3388, -1759, 933,
3817, 5242, 3025, 248, -1339, -514, 2022, 3410, 3970,
3324, 2632, 2603, 2240, 2166, 1271, 487, 1076, 2039,
3296, 3836, 3610, 2913, 2718, 4213, 5555, 6023, 4769,
2442, 2067, 2173, 1623, 1201, 348, 52, -124, -1528,
-2834, -3604, -3463, -2357, -2564, -3775, -3801, -1929, -465,
-2109, -3743, -2657, 200, 2580, 954, -1304, -95, 1549,
2303, 1795, 1633, 3356, 3699, 2361, 792, 1148, 4045,
4820, 3851, 3197, 2449, 2704, 1722, -652, -1154, -393,
113, -1010, -3328, -4342, -3939, -3345, -3697, -5115, -5610,
-4202, -3639, -5088, -5351, -3216, -862, -414, -1839, -3996,
-4831, -2467, 147, 1055, 1288, -247, -2225, -2233, -1562,
-1278, -936, -961, -935, -367, -323, -459, -1940, -3974,
-2262, -13, 2, -401, -1825, -2308, -1124, 448, 2154,
2434, 1300, -812, -1337, 1325, 3374, 3466, 2500, 2156,
3439, 3549, 2068, 1392, 1986, 3025, 3944, 3898, 3259,
4467, 6347, 5356, 2893, 1690, 2072, 4136, 5313, 2776,
-236, -1063, -794, 524, 802, -1377, -2879, -2167, -1439,
-1595, -1539, -1666, -2495, -2375, -1253, -515, -187, -1409,
-2847, -511, 2411, 1761, 492, -18, 607, 2350, 3288,
3505, 2741, 1099, 699, 2017, 3214, 3333, 1567, 33,
1260, 1925, 808, -377, -2558, -3781, -1677, 164, -580,
-1727, -2619, -3421, -3586, -3957, -4562, -3646, -2285, -3437,
-5293, -4792, -4128, -4012, -2920, -2249, -2439, -3737, -5607,
-4427, -1259, 71, 609, 555, -1039, -3354, -5388, -3760,
415, 2513, 2513, 819, -1436, -2780, -2740, -501, 2727,
3936, 1491, -965, -766, -484, -223, 361, 695, 1771,
1130, -1839, -1764, 797, -31, -2549, -1790, 2108, 4043,
887, -154, 2411, 2605, 2012, 1977, 3923, 6630, 4176,
107, -311, 1731, 1910, 1011, 3119, 3219, 998, -1282,
-2832, -1645, -685, 945, 2574, 2543, -267, -5015, -3819,
-342, 1228, 2055, -619, -1233, 2069, 2896, 1095, 62,
1365, 3366, 4584, 4956, 3323, -19, -50, 4024, 5222,
3695, 3118, 1933, 1256, 1443, 128, -119, 2043, 2477,
1823, 1324, 30, -1363, -3023, -3074, -188, 621, -1775,
-2806, -2961, -2753, -4359, -5350, -1220, -116, -4157, -4811,
-2793, -1040, -1957, -2862, -1901, -3192, -3720, -2357, -1727,
-387, -2131, -5011, -3650, -454, 596, -1298, -3716, -3122,
496, 136, -2415, -1675, -811, -837, 140, -1243, -187,
-1431, -5320, -2121, 100, -467, 2465, 681, -2093, 1224,
1632, 1428, 1776, 648, 2480, 3622, 876, 259, 1403,
2139, 3117, 497, -763, -170, 279, 1769, 342, -871,
-25, -1549, -2290, 290, 1042, -796, -4291, -3895, 159,
1264, -540, -2328, -702, 1972, 852, -2274, -798, 1126,
-579, -480, 3481, 3833, 1004, 901, 1536, 1809, 3103,
2521, 3183, 5220, 1800, -266, 4663, 4230, -790, 159,
2274, 5114, 4304, -1998, 344, 4921, -343, -2048, 1180,
2112, 3109, -10, -1818, 552, -1360, -2889, -1302, -1918,
-37, 1406, -1762, -3054, -1446, -2073, -4292, -3214, 1163,
2333, -712, -2583, -2058, -1034, -600, -3796, -2395, 2137,
-1122, -1927, 702, -2196, -4374, -3257, -1558, -256, -728,
-395, -176, -1529, -2772, -1121, -340, -1147, -250, -4079,
-473, 4241, -2818, -3523, 3255, 2355, -2550, -1082, 1197,
2213, -94, -237, 3123, 1314, -1075, 977, 1081, 2045,
2966, -1328, -1069, -741, -524, -380, -2766, -986, 926,
-3281, -1554, 2554, -3620, -6394, -1680, -321, 2889, 243,
-1567, 2276, -1294, -525, 2010, -4883, -1495, 6778, 2085,
-873, 2496, 418, -1156, -1179, 1604, 6173, 1190, -2381,
5788, 2431, -4941, -242, 1248, 1023, 4426, 3399, 2726,
1388, -922, 595, 392, 1414, 6260, 2673, -973, 2237,
1776, -2393, -757, 4158, 2842, -2327, 505, 1230, -3623,
-917, 336, -1400, -1018, 1771, 2696, -570, -2435, 886,
2309, -2865, -1328, 2077, -1967, -3486, -411, 961, -1661,
-1979, 1179, -493, -2597, 1995, 284, -3300, -2213, 184,
312, -1665, -641, -1325, -1276, 90, 69, 476, -778,
-1099, 853, 1515, 1630, 1188, -877, -1751, 702, 2983,
-201, 664, 4018, -352, -1864, 875, 2367, 813, -2463,
-702, 886, -2204, -2216, 399, -1729, -2408, 1412, -2757,
-3530, 449, -2554, -3910, 906, 697, -1696, 566, -1360,
-1991, 81, -1756, -159, 1180, -667, -584, -359, 183,
1943, -412, -1747, 1659, 1961, 280, 294, 222, 2000,
2076, 829, -43, -880, 3353, 3615, 1279, 1746, -1031,
1301, 3477, -777, 2567, 1215, -2344, 3556, 561, -2166,
1119, 2377, -391, -1825, -2359, 49, 1764, 391, -291,
325, 1223, 1443, -624, -2828, 1381, 2438, 28, -652,
-166, 581, -2039, -374, -20, -2459, -1149, 1505, 2008,
-1798, -3848, -1796, -2208, -2224, -878, 728, -154, -534,
1061, 538, -1465, 73, 1147, 82, -119, 3800, 4797,
-873, 784, 1458, -148, 3180, 1319, 908, 4951, 584,
-57, 2394, -967, 586, 405, -1601, 3566, -285, -3949,
-1301, -1953, -1223, -1831, -3477, -779, -389, -3169, -1828,
-1496, -1451, -556, -3327, -209, 534, -4908, 131, -386,
-5232, 1373, 2129, -1740, -1957, -1102, 76, 396, -1426,
-179, 1357, -3276, -1420, 3819, -44, 56, 2777, -1202,
1908, 1410, 2031, 3495, -2197, -163, 1565, 239, 2803,
480, -1636, 1180, 616, 1206, 1166, -1579, 1572, 814,
-774, 2310, 740, -2606, 1234, -603, -362, 1562, -2134,
652, -777, -2353, 5464, 377, -2490, 1012, 157, 680,
-1389, -1898, 1135, -1, -1730, 1800, -1466, -1687, -1469,
-3250, -1081, 1381, -81, -204, -26, 353, 1941, 174,
104, 2009, 1032, -871, 3280, 3398, -651, -154, 3309,
1964, 448, 812, -17, 887, 2405, 3295, -54, -2396,
1410, 1380, -1156, 296, -1706, -1729, 401, -970, -878,
-723, -2285, 1259, 1320, -1960, -1039, -211, -661, -763,
-1599, -43, 308, -1841, 72, -2075, -3010, -497, 506,
-377, 247, 1932, -1788, -2419, 257, 208, -2176, 488,
2827, -1720, -1649, -619, 520, 1103, -1231, -1327, 2162,
1535, -383, 315, -1488, -235, 1761, -27, -232, 515,
127, -2239, 654, 2871, -379, -1274, 2445, 874, -2444,
514, -206, -1289, 1314, 1869, 1316, 1878, -1454, -982,
476, 359, 2084, -708, 405, -246, -1071, 1757, -866,
-2331, 783, 501, -853, 896, 36, -2468, -1138, 1445,
-613, -687, 1999, -449, -731, 1478, 384, -45, 96,
1530, 1919, 186, -94, 1347, -329, -348, 1631, 574,
1062, 735, -1652, 675, 244, 1241, 1137, -2469, 621,
45, -612, 1308, -2015, -208, 2392, -1646, -67, 77,
-1558, 113, 1263, -236, -971, -333, -733, -555, 2024,
-135, -3817, -398, 1696, -1179, -1473, 1175, -166, 618,
1132, -2504, -575, 146, -688, 1323, 150, -2021, 15,
1673, 347, -1535, -106, 235, -32, 1167, -471, -503,
-1260, 416, -13, -1082, 1036, -790, -1676, 487, 985,
77, 57, -1175, 1146, 2023, -1706, -404, 3249, -739,
-979, 3044, -514, -168, 2201, -2863, 1009, 1833, -2309,
1565, 476, -1698, 1667, -496, -2193, 1686, 532, 336,
-1095, -1655, 578, -909, -1263, 2569, -2833, -1808, 2860,
-822, 27, 1098, -1371, 1585, -284, -1074, 2944, -764,
-2871, 2484, 1179, -1213, -670, -1226, 1112, 1837, -299,
-388, -51, 1, 992, -723, -361, 1723, -1115, -2012,
1261, -9, -127, -510, -1550, 1448, 957, -1930, 171,
776, -2104, 14, 764, -599, -745, -438, -371, -659,
1075, 282, -3116, 684, 3747, 22, -2139, 816, 1413,
-333, 458, 906, 483, -1084, 797, 1039, -467, -377,
1386, -1182, 610, 1787, -1354, -2800, 2638, 424, -2372,
1153, -51, -689, 290, -2199, 818, 3755, -2674, -1689,
3497, -507, -1978, 1729, 1413, 215, -76, 53, 759,
371, -1529, 1005, -770, -685, 1754, -908, -653, 1047,
-1066, -784, -199, -526, 86, -1750, -916, 1839, 580,
-1884, 319, 226, -977, 212, 202, -741, -1013, 2057,
69, -2961, 974, 1964, -512, -224, 1554, -79, -1142,
1853, -71, 1009, 1174, -718, 2040, -158, -1508, 1042,
0, -1219, 1212, 448, -208, -47, -779, -867, 1924,
-254, -1085, -221, -1283, 1543, -584, -951, 225, -1089,
-464, -853, -615, 1576, -2313, -1214, 950, -2548, -314,
1201, -1527, 952, 764, -1915, 528, 169, -1676, 1742,
425, -2346, 932, 290, 109, 492, -379, 932, 70,
582, 135, 769, 1665, -1751, 576, 1013, 366, 2339,
71, 637, 1500, 576, 111, 494, 765, 1170, 1421,
-5, -892, 2054, -640, 160, 1426, -651, 348, -841,
-558, 1563, 277, -408, -1468, 482, -1538, -2255, 968,
-1307, -454, 1306, -3085, -1680, 2624, -2191, -1719, 1891,
-3826, -1441, 2736, -3694, -266, 1897, -4468, 841, 2828,
-4060, -318, 2305, -1662, 528, 3056, -2429, -156, 2045,
-753, 475, 419, -597, 1100, 1845, 504, 1067, -402,
-824, 1807, 1192, 459, 200, 1728, 50, -497, 678,
-355, 938, 1239, -1223, 360, 1251, -95, 981, 1029,
-1940, 260, 1627, -2387, 3426, 519, -3141, 1822, -506,
-1471, 1101, -2137, 1069, 885, -2618, 1673, -463, -1558,
1439, -386, -1923, 1538, -1313, -1735, 540, -1433, -915,
494, -839, -1527, -1143, 480, -1081, 27, 1732, -1285,
-1833, 1952, -667, -1626, 1819, -1293, -1323, 2139, -376,
-1392, 1277, -1172, -240, 2907, -1875, -238, 2573, -1068,
-471, 2065, -686, -1315, 2575, 233, -1005, 1135, 706,
534, 278, -182, 1091, -21, -222, 1413, -371, -54,
1108, -103, 382, -70, 787, 894, -108, 1308, 1113,
-1412, 574, 1140, -2032, 500, 569, -1251, 951, -50,
-1398, 772, -474, -1536, 1297, 251, -2321, 109, -703,
-425, 40, -1354, -773, -225, -1743, -1839, 1244, 261,
-3082, -424, 1162, -937, 123, -322, -407, -561, -331,
1369, -1142, -1050, 1024, 1116, -213, -752, 1521, -383,
-415, 1011, 947, -713, 743, 1945, -237, 881, 600,
-757, 885, -835, 756, 2454, -1985, 699, 1572, -1652,
673, 232, -42, 1975, -736, -270, 1660, -704, -96,
1264, -428, 278, 774, -954, -1325, 756, 1275, -594,
-353, 204, -1130, -782, -432, -979, 268, 378, 20,
-870, 405, -357, -1661, 637, 473, 293, -314, -895,
3, -175, -1016, -643, 204, -588, -1007, -131, 401,
-849, -476, 271, 320, -198, 533, -25, -1994, 1421,
525, -1611, 1261, 507, -488, 1093, 361, -1814, 2230,
312, -196, 3242, -803, -962, 1714, -1479, 1426, 1612,
-1953, 1376, -581, -669, 1370, -1251, 426, 1274, -470,
1757, 807, -589, 1275, 126, -871, 1025, -1331, 287,
1258, -1813, 146, -839, -1471, 828, -402, -281, 1704,
-1341, -231, 939, -1035, -472, -197, -764, -380, -816,
-266, 382, -497, -1708, -591, 1119, -1941, 178, 969,
-1656, 685, 1004, -1114, -127, -1473, -678, 1610, -1253,
277, 1807, -1642, -461, 2033, -1449, 392, 98, -157,
1525, -860, 2455, 413, -2159, 2457, 475, -374, 1532,
-981, 843, 973, 324, 1168, 225, -407, 1487, 681,
-680, 1098, 117, 245, 1238, -223, 1076, -428, -466,
2593, -663, -1225, 1303, -933, -561, 1190, -1071, -1229,
406, -284, -13, 198, -1494, -637, 352, -1960, 420,
49, -1472, -761, -234, -2213, -1750, -521, -1554, -813,
662, -633, -1388, -15, -947, -391, -152, -894, 631,
-461, -885, 633, -51, -1063, 218, 1149, -61, -274,
988, -140, 7, 1774, 1558, -623, 755, 1352, -511,
1106, 744, 17, 2640, -91, 697, 1547, -1757, 1832,
1859, -206, 1505, 575, -444, 556, 250, 1786, 792,
-125, -266, 407, 501, 798, -536, -1214, 58, 6,
354, -685, 613, 99, -2022, -116, -236, -182, 263,
-824, -1187, -142, -138, -1228, -1008, 786, -1421, -1127,
-269, -2278, 841, 222, -2423, 678, -1153, -2082, 574,
-570, -729, 180, -777, 212, 270, -274, 1077, -493,
118, 804, -1260, 349, 799, 545, 481, 971, 1099,
1146, -273, 34, 1728, 1128, 411, 758, 308, -808,
950, 1490, 209, -265, 1154, -11, -460, 2644, -122,
-728, 2033, -1100, -305, 1774, -208, -1567, -57, -140,
-670, -454, -1390, -80, 978, -438, -731, -684, 344,
-458, -199, -126, -1663, -883, 642, -1517, -1144, -375,
-422, -452, -1815, -791, 763, -1502, -205, 684, -1641,
448, 1399, -2160, 804, 1088, -2214, 1030, 1585, -1093,
-11, 1718, -360, -81, 1294, 398, 218, 1225, 644,
505, 2090, -385, 526, 2111, -303, -316, 1550, 1323,
-459, 881, 1874, -1256, 1429, 2485, -1003, -552, 14,
432, 952, 471, -633, 408, -358, 140, 554, -1260,
-404, 245, -2572, 954, 1005, -1621, -82, -175, -957,
112, 106, -1117, -819, -62, -785, 71, 93, -1296,
-1680, 242, -956, -2696, 302, -204, -1404, 254, -558,
-201, -630, 16, -436, -1647, 1649, -1096, -1267, 2273,
-1270, 20, 1749, -2509, 780, 942, -1859, 2762, 304,
-300, 2617, -947, 861, 2601, -1153, 754, 1629, -681,
686, 1443, -235, 1900, 5, -565, 1559, 285, -170,
757, 480, 547, 752, -427, 50, 839, -95, -791,
-1698, -291, -62, -1730, 524, 1008, -2176, -369, 165,
-749, -972, -287, 889, -1218, -1712, 833, -855, -995,
-14, -793, -1815, 605, -607, -1890, 769, -781, 230,
1155, -2000, 876, 1835, -1617, 9, 1058, -1232, 859,
1486, -1301, 1595, 501, -951, 2935, -921, -634, 2826,
-793, 655, 2660, -232, 235, 1879, 481, -51, 804,
987, -360, -331, 2099, -302, -149, 1966, -1233, -12,
1330, -2265, 1256, -116, -1394, 2937, -995, -1572, 2964,
-2257, -2587, 1820, -2132, -1609, 778, -1596, -486, 560,
-1749, 274, -706, -1714, 1304, -360, -2657, 1833, -750,
-1729, 433, -1461, -794, -1545, -892, 385, -891, -374,
1261, -589, 235, 815, -773, -669, 636, -471, 136,
871, -392, 782, 677, -472, 1130, 1029, -1262, 1070,
2171, 575, 675, 600, 2104, 1077, -182, 2621, -604,
-30, 3302, -1331, 599, 742, 291, 1329, -551, 1043,
1729, -1754, 1220, 1113, -2174, 1281, 743, -2027, 851,
-205, -1576, 214, -1629, -605, -394, -1508, -254, -63,
-489, -847, -26, -997, -1065, -120, -376, -1283, -1393,
83, -212, -1610, 419, -1120, -590, 395, -1210, -21,
-273, -622, 899, -196, -1059, 1130, 616, -529, -166,
794, 22, -216, 862, 664, -390, 980, 228, 789,
182, 402, 2149, -1133, 799, 2637, -799, 176, 1306,
905, -93, 677, 338, 121, 483, 297, 339, 347,
249, 731, 40, 66, 112, -889, -128, 582, -1191,
-67, -1364, -233, 488, -1734, -634, 1517, -1657, -1015,
594, -1422, 1396, -1357, -1617, 1254, -1596, -941, 789,
-1860, -77, 245, -327, 569, -723, 104, 905, -543,
-918, 1387, -42, -440, 619, 68, 45, 1364, -880,
19, 1491, -561, 1174, 1403, -1411, 1351, 1222, -612,
864, 877, -658, 382, 864, -552, 1286, 309, -105,
1083, -170, -289, 1049, -248, -537, 625, -48, 337,
-385, 532, -315, -1398, 588, -628, -1192, 649, -806,
-170, 541, -2267, 1052, 274, -1970, 833, 253, -1345,
-290, -120, -959, -94, -189, -1397, -136, -155, -654,
207, -706, 617, 415, -1962, 1169, 670, -1132, 319,
297, -589, 100, 510, -620, 610, -153, -15, 1327,
-99, 229, 281, 169, 1015, -106, 1197, 577, -698,
577, 931, -964, 1605, 505, -1713, 2369, 115, -1585,
1839, 664, -1411, 867, 620, 329, 491, -1119, 420,
266, -1708, 499, -69, -1037, 795, -321, -959, 32,
235, -1748, 295, -249, -230, 485, -1185, -97, 489,
-2036, 711, 405, -2800, 593, 434, -1038, 536, 347,
-570, 705, -806, -290, 818, -999, 53, 1585, -756,
-657, 1180, 115, -364, 217, -226, 1033, 347, -20,
611, 658, 590, -128, -451, 1676, -660, -21, 805,
-880, 1481, 412, -1534, 1522, 221, -132, 662, -407,
613, 1132, -551, -187, 1184, -577, -444, 953, -1034,
-472, 461, -865, -99, 637, -572, 300, 450, -591,
137, 404, -972, 306, -524, -1167, 433, 124, -1326,
-368, -305, -917, 452, -626, -695, 656, 258, -1401,
270, 446, -1045, 636, -357, -1072, 913, 512, -1732,
489, 952, -747, 58, 673, -453, 1125, -488, 46,
1723, -1244, 417, 1803, -1215, 623, 659, -560, 676,
-9, 92, 701, 1100, -623, 142, 283, -512, 547,
576, -525, -155, 1143, -1286, -329, 1959, -1302, -459,
1188, -1199, 1020, -118, -1303, 956, -905, -647, 595,
-356, -1354, -74, 750, -791, -335, 56, -862, -36,
276, -279, 46, -485, -181, 196, -584, -238, 259,
-314, -77, 383, 509, -386, -180, 859, -542, 955,
372, -362, 1458, 113, -106, 1495, -534, 63, 1295,
-505, 846, 983, -1097, 1764, 320, -185, 1061, -525,
115, 217, -328, 326, 312, 374, 179, -683, 485,
-1286, 147, -583, -979, 888, -504, -1235, 715, -1050,
-1111, 848, -828, -1043, -115, -327, 22, -451, -1008,
98, -262, -545, -363, -48, -257, -731, 878, 96,
-1186, 426, 359, -1101, 1074, -267, 521, -375, -166,
1398, -994, 780, 550, 124, -298, 581, 236, 305,
-111, 396, 741, -10, 662, 155, 271, 563, 65,
-318, 812, -483, 843, 75, -714, 1152, -26, -190,
-97, 533, -111, -564, 724, -24, -820, 835, -473,
-632, 154, -104, -932, 919, -606, -619, 496, -310,
-271, -360, 120, -630, 126, 65, -931, 548, -207,
-455, 410, -282, -931, 944, -354, 69, 412, -661,
1068, -969, -443, 1894, -1281, -442, 2003, -1640, 713,
852, -1344, 1338, -457, 243, 498, -697, -129, 993,
-388, -76, 1039, -768, 492, -104, -58, 951, -854,
181, 1093, -1111, 491, 544, -1061, 118, 586, -477,
-411, 392, 233, 91, -908, 532, 218, -1176, 670,
-74, -674, 696, -801, 194, 592, -1790, 762, -564,
-791, 595, -145, -727, 228, 434, -246, -232, -169,
281, -324, 289, -120, -270, -49, 282, 250, -56,
-405, 507, 27, -1060, 1329, -203, -204, 1677, -767,
-313, 1272, -968, 717, 183, -1652, 2157, -75, -1906,
2590, -428, -1614, 2564, -1511, -240, 1421, -1911, 1420,
396, -1397, 1691, -694, -1500, 1942, -823, -784, 841,
-635, 759, -447, 351, 44, -946, 227, 441, -564,
155, -719, 182, 509, -320, -300, 205, -662, 726,
469, -1240, 191, 664, -269, -152, -18, 214, -149,
-257, 347, 76, -79, -384, 874, -387, -269, 892,
-783, 537, 46, 27, 251, -332, 133, 377, -522,
232, 626, -362, -499, 1112, -342, -522, 362, -187,
547, -384, -155, 517, -551, 227, 651, -825, -88,
579, -758, -40, 456, -774, 542, -164, -482, 968,
-1000, -394, 1094, -885, 431, 74, -348, 403, -959,
831, -465, -330, 762, -717, -645, 1342, -499, -416,
944, -417, -438, 737, -368, -42, 740, -1234, 689,
29, -106, 619, -824, -10, 1047, -824, 146, -59,
210, 163, -43, 522, -352, 213, 460, -1049, 599,
308, -843, 632, 223, -504, 296, 530, -931, 751,
-176, -524, 379, 236, -626, 66, 662, -575, 191,
-175, -619, 660, -424, -217, 704, -498, 200, 62,
-543, 280, 91, -378, 54, 168, -554, 670, -215,
-1097, 1805, -1015, -617, 1642, -1560, 727, 61, 7,
-48, -659, 1308, -752, -613, 914, 160, -469, 164,
-167, 274, 326, -667, 497, 333, -757, 1252, -481,
-1257, 2019, -949, -719, 1676, -1078, 250, 323, -1100,
1550, 145, -1697, 972, 522, -966, 374, -365, 846,
-276, -756, 629, -278, 302, -151, -243, -363, 841,
-7, -1092, 476, 45, 201, -378, -456, 1113, -926,
97, 178, -240, 326, -597, 472, -10, -190, 394,
-501, -259, 307, 133, 240, -433, -192, 472, -190,
12, 398, -191, -605, 1295, -576, -154, 474, -661,
866, -968, 172, 887, -736, 36, 259, -201, 265,
460, -859, 622, 102, -690, 776, -80, -745, 919,
140, -750, 224, 134, -236, -196, 456, 409, -1069,
600, 239, -306, -383, 541, -213, -323, -121, 700,
-735, 179, 222, -613, 653, -711, -81, 592, -694,
117, 703, -772, -264, 644, -117, -422, 276, 64,
-355, -430, 800, -74, -619, 1207, -1057, 4, 960,
-1219, 977, -78, -1186, 1536, 267, -1388, 1144, -90,
-1052, 1889, -1255, -387, 1815, -1763, 1037, 421, -1003,
767, -24, -277, -54, 759, -285, -1015, 1422, -581,
-121, 547, -687, 288, 440, -626, -623, 1261, -248,
-1133, 1204, -714, 382, 219, -851, 240, -161, 672,
-261, -855, 1043, -599, 111, -362, 225, 641, -913,
-122, 1075, -1165, 432, 131, -803, 978, 33, -1291,
992, 224, -1054, 789, -121, -215, 262, -11, 89,
-174, 365, -240, 114, 406, -813, 291, 233, 158,
-377, 194, 216, -477, 635, -228, -512, 599, 23,
-273, 71, 258, 10, -155, -198, 354, 61, -749,
768, -19, -709, 596, 97, -276, 164, 69, -144,
-20, 529, -897, 188, 480, -703, 836, -874, 259,
917, -1044, -7, 566, -97, -439, 256, -466, 998,
-360, -1134, 1619, -762, -752, 1446, -707, -177, 652,
-899, 579, 253, -410, 146, -262, 275, 353, -610,
52, 671, -862, 419, -140, 273, 247, -1062, 1005,
-175, -497, 772, -431, -101, 450, -598, 266, 428,
-842, 477, -11, -554, 642, 17, -787, 544, 445,
-625, -205, 796, -222, -733, 764, -572, 423, 166,
-994, 931, -228, -303, 362, -214, 104, 448, -1091,
722, 570, -1311, 773, 259, -648, 477, 193, -682,
302, 459, -464, -383, 1120, -561, -564, 1083, -372,
-354, 864, -586, -200, 502, -331, 27, 446, -657,
281, 571, -888, 502, 251, -423, 116, 277, -263,
118, -170, 168, 367, -723, 202, 438, -793, 451,
-30, -292, 202, 38, -188, -66, 221, -90, -105,
7, 346, -578, 337, 247, -371, -14, 22, 36,
151, -322, -244, 692, -556, -5, 550, -560, 200,
161, -347, 191, 258, -520, 441, -212, -215, 584,
-428, -251, 213, 90, -187, 109, 138, -211, -17,
191, 111, -259, 161, -141, 232, -175, 0, 154,
-369, 539, -171, -438, 484, 43, -375, -37, 249,
196, -328, -106, 541, -531, 103, 240, -191, 186,
-363, 40, 585, -573, 258, 170, -593, 515, -261,
-86, 407, -339, 164, -214, -34, 464, -377, -206,
336, -230, 239, -85, -69, 322, -503, 322, 142,
-748, 867, -160, -753, 836, -249, -362, 750, -374,
-222, 448, -82, -246, 399, 13, -429, 441, -47,
-127, -29, 337, -502, 318, 132, -457, 498, -145,
-91, 98, 208, -179, 54, 62, -260, 237, 96,
-161, 32, -150, 93, 21, -31, 74, 75, -322,
164, 168, -191, 119, -121, -66, -195, 296, -128,
-251, 381, -56, -338, 281, -29, -472, 664, -301,
-275, 423, -285, -77, 258, -82, -139, 160, -54,
-26, 27, 75, -49, -196, 305, -131, -187, 262,
-37, -206, 65, 269, -240, -144, 261, 54, -338,
355, 3, -503, 535, -253, -210, 433, -290, -33,
381, -546, 173, 252, -364, 271, -329, 166, 266,
-564, 507, -32, -648, 861, -400, -357, 819, -519,
-74, 392, -423, 426, -306, -93, 691, -991, 537,
467, -992, 614, 426, -823, 491, 182, -371, 174,
84, -64, 98, -96, 23, 182, -69, -211, 226,
18, -134, 334, -514, 352, 378, -623, 363, 266,
-592, 493, -46, -369, 594, -440, -10, 295, -368,
326, -192, -140, 306, -305, 140, 198, -396, 202,
154, -341, 208, -8, -169, -76, 106, 20, -347,
233, 30, -193, 117, -9, -165, 182, -4, -195,
96, 131, -188, -106, 166, -71, -99, 57, 4,
-31, -131, 101, 63, -199, 225, -25, -281, 342,
-247, -170, 516, -289, -263, 422, -158, -148, 363,
-192, -138, 122, 62, -105, 7, 194, -53, -224,
83, 173, -182, 20, 178, -274, 182, 74, -109,
-5, 319, -303, -72, 428, -371, 50, 271, -204,
17, 161, -256, 169, 93, -169, 94, -89, 139,
80, -199, 325, -67, -83, 202, -154, 16, 202,
-325, 162, 61, -93, 201, -278, 236, 108, -477,
594, -145, -370, 647, -261, -356, 669, -369, -181,
420, -266, -154, 159, -25, 53, -40, -22, 68,
-203, 144, -2, -173, 88, -3, -62, 2, 75,
55, -95, -130, 219, -142, -191, 164, -170, 44,
0, -246, 249, -27, -413, 461, 27, -490, 292,
19, -145, 13, 99, 91, -466, 209, 295, -773,
465, 210, -680, 410, 163, -358, 399, -201, 87,
23, -212, 270, -230, 86, 159, -353, 381, -73,
-456, 726, -353, -357, 754, -367, -344, 657, -59,
-417, 432, 35, -309, 153, 97, -69, 89, -101,
63, 107, -127, 106, 112, -26, -236, 376, 43,
-479, 544, -57, -407, 447, -148, -103, 195, -198,
80, 156, -228, 35, 145, -77, -55, 130, -33,
-190, 123, 41, -170, 74, 114, -241, 67, 192,
-195, -76, 186, -136, -133, 213, -105, -110, 144,
-51, -126, 154, -59, -124, 147, -49, -132, 82,
26, -130, 63, 68, -211, 97, 131, -224, 59,
184, -250, 59, 205, -225, -67, 163, -135, -24,
74, -22, -4, -81, 21, 71, -137, 71, 47,
-120, 71, 34, -65, 138, -6, -116, 112, -47,
-39, 20, -75, 64, -7, 2, 35, 52, -61,
-29, 81, -61, -30, 195, -91, -136, 261, -11,
-186, 162, -86, -35, 152, -106, -32, 126, -4,
49, 33, -9, -11, 46, 111, -132, -3, 204,
-175, -10, 281, -146, -94, 226, -126, -36, 58,
-14, 61, -172, 48, 193, -221, 83, 149, -279,
195, 130, -357, 226, 102, -260, 191, 16, -223,
124, 14, -144, 90, -31, -81, -66, 54, 103,
-181, 29, 174, -281, 92, 81, -226, 139, -133,
-41, 167, -147, 44, 27, -132, 107, -34, -122,
105, -54, 17, 52, -131, 138, 33, -206, 158,
43, -80, 24, 10, -27, 33, 43, -71, 15,
71, -42, 14, 18, 0, -3, -14, -14, 58,
46, -99, 122, 105, -202, 125, 119, -238, 112,
133, -242, 113, 129, -301, 52, 161, -177, 82,
73, -139, 46, 122, -119, 22, 155, -230, 23,
242, -211, -12, 182, -184, -57, 190, -34, -101,
58, -20, 6, 103, -61, -78, 12, 18, 12,
86, -71, -27, 43, -24, 8, 39, -109, 21,
-4, -44, 66, 13, -59, 61, -39, 35, 113,
-179, 19, 171, -158, 14, 112, -133, 26, 9,
-43, -9, 6, 41, -77, 22, 80, -61, -63,
65, -32, -32, 125, -105, -11, 114, -120, 42,
42, -92, 45, -56, -25, 131, -83, -24, 97,
-51, -5, 67, -69, 7, 41, -27, 8, 3,
-10, 8, -3, -87, -28, 122, -33, -58, 124,
-53, -50, 67, -115, -17, 111, -112, -30, 101,
-24, -13, 41, 3, 45, -13, -34, 23, 23,
-19, 13, -49, -49, 68, -68, -32, 91, -58,
-18, 73, -19, -27, 17, -33, -35, 99, -38,
-99, 78, -31, -62, 95, -71, -124, 184, -15,
-146, 160, -27, -109, 140, -25, -63, 84, -34,
-18, 58, -68, -16, 22, -87, 86, 23, -130,
61, 62, -132, 51, 168, -139, 35, 133, -121,
50, 102, -120, 40, 126, -87, -40, 119, -14,
-59, 78, 11, -68, 41, 24, -25, 55, -2,
15, 21, -73, 56, 88, -74, -41, 4, -10,
-4, 5, 7, -39, -3, -4, -39, 94, 52,
-135, 42, 90, -86, 12, 21, -55, -70, -37,
55, -63, -35, 50, -100, 21, 84, -151, 24,
87, -94, 51, 2, -58, 104, -61, -70, 60,
-25, -42, -31, 55, 35, -129, 47, 69, -65,
77, 2, -60, 110, -32, -69, 84, -54, -26,
98, -28, -7, 49, -49, -19, 119, -11, -157,
20, 106, 29, -8, -38, -30, 72, 30, -3,
1, -32, -11, -9, 52, 46, -144, -38, 86,
-31, -9, -42, -75, 142, 34, -64, 79, -109,
-55, 195, -69, -80, 48, -49, 62, 25, -111,
-42, 52, 19, -41, 1, -16, -33, 44, 30,
-21, 17, -2, -30, 111, 34, -111, 83, 55,
-119, 66, 62, -89, 63, -39, -143, 168, 21,
-158, 158, 32, -132, 134, -3, -77, 88, -45,
-18, 117, -51, -71, 10, 30, 35, -27, -63,
13, 34, 23, -23, 19, -4, -92, 34, 74,
-69, -15, 20, -36, 56, -36, -96, 69, -34,
-122, 32, 31, -51, -3, -21, 4, 43, -44,
6, 81, -39, -35, 26, -38, -24, 29, -16,
-47, -6, 19, -7, -9, 41, 32, 13, -2,
-21, 3, 24, 49, -3, -66, 14, 95, -7,
-52, 80, 68, -72, -14, 39, 2, 24, -6,
-53, 86, 21, -78, 67, 28, -34, 16, -23,
-1, 70, -3, -58, 45, 33, -94, -34, 62,
41, -11, -27, 27, 46, 14, -33, -12, 44,
-16, -59, 6, 45, -3, -42, 2, 13, 19,
-1, -71, 3, 42, -36, 6, 17, 26, 5,
-46, 6, -68, -75, 86, -20, -90, 80, 4,
-86, 5, 2, -33, -15, -2, -8, -18, 15,
-7, -25, 27, -28, -88, 39, -2, -85, 58,
40, -45, 3, 17, 0, 11, -4, -3, 84,
22, -113, 8, 94, 10, 9, 28, 6, -3,
5, -2, 23, 23, -1, -40, 20, 48, -40,
-21, 72, 7, -40, -1, 27, 16, 30, 31,
-16, 11, 9, -71, -7, 62, 21, -61, -19,
78, -2, -22, 67, -42, -12, 75, -79, 47,
86, -124, -42, 21, 4, 23, -32, -7, 19,
1, -13, -46, 2, 32, -43, -7, 86, -16,
-22, 46, -61, -35, 11, -64, -38, 17, -12,
-27, 20, 41, 6, -58, -61, 58, -51, -77,
36, -25, 19, 93, -76, 1, 72, -92, 15,
40, -56, 65, 13, -29, 82, -9, -21, 24,
-83, -5, 4, -63, 77, 80, -58, -6, -19,
-43, 100, 5, -36, 63, 33, -26, -48, 26,
-18, -75, 34, 24, -45, -1, 6, -35, -24,
-23, -22, 47, -15, -46, 31, -40, -41, 74,
-32, -73, 59, -51, -26, 143, -29, -42, 93,
-44, -21, 56, -7, 55, 51, -61, 74, 111,
-71, 35, 124, -123, -3, 62, -79, 100, 49,
-122, 143, 79, -137, 72, 30, -82, 75, -10,
-48, 35, -23, -25, 34, 0, -54, -6, 34,
-46, -59, -7, -72, -6, 70, -41, -39, 23,
-33, 11, 104, -44, -30, 54, -69, -20, 62,
-75, 1, 45, -69, 1, 40, -59, -15, 18,
-16, 38, -1, -52, 8, 14, -32, 11, -15,
-58, 18, -22, -44, 69, 40, -50, -21, 1,
-35, -3, -5, -20, 40, 36, -41, -36, -43,
-11, 48, -34, -40, 51, -10, -9, 30, 10,
12, 51, 51, -8, -16, 32, -6, 31, 24,
-38, 43, 18, -15, 53, -10, -55, 9, 8,
-28, 21, 10, -26, 21, 10, -9, 5, -29,
-13, 38, -1, -11, 49, 0, -41, 10, 23,
-25, -35, -2, -32, -10, 58, -6, -18, 16,
-9, 4, 11, 17, 21, 21, 12, -2, 49,
-16, -128, 21, 75, -32, 22, 34, -59, 48,
75, -69, -11, -2, -65, 39, 57, -54, -79,
-11, -20, -13, 38, 4, -9, -22, -22, 33,
-7, -52, 10, -10, -19, 54, 47, -21, -35,
-6, -4, 11, 8, -28, 1, 8, -4, 30,
1, -22, 26, -7, -24, 56, 25, -45, 13,
24, -32, 13, 22, -46, -2, 15, -39, 28,
32, -69, 0, 27, -69, 0, 39, -40, 28,
55, -27, -13, 0, -14, 37, 25, -25, 34,
-3, -69, 26, 39, -41, -6, 29, -7, 5,
66, 41, -27, -17, 6, -14, -21, 0, 29,
-9, -26, 32, -5, -34, 60, 15, -60, 20,
13, 11, 43, -48, -15, 88, -13, -55, 26,
-32, -46, 35, 14, -37, -11, 12, -20, 11,
9, -64, -16, 17, 5, 38, 7, -30, -9,
-49, -11, 52, -15, -38, -27, -12, 36, 53,
1, -37, -17, -12, 0, 31, 1, 13, 40,
-15, 2, 47, -15, -17, 28, -2, -4, 25,
-6, -12, 2, -17, -9, 5, -15, 17, 21,
-28, 0, 15, -43, -63, -6, -14, -8, 37,
-34, -40, 30, -12, -14, 37, -13, -16, 26,
-15, -2, 13, -37, -13, 32, 13, -8, -2,
-12, -8, 9, 9, -3, 4, 13, 34, -2,
-22, 40, 19, 29, 25, -48, -17, 23, 17,
7, 3, 0, 12, 37, -1, -25, 30, 41,
-7, 7, 29, -31, -31, -23, -27, 5, 2,
-18, -2, 22, 9, -6, 5, -7, -24, 9,
0, -28, 19, 61, -11, -45, 21, -28, -65,
28, 33, -44, -27, -6, -26, -8, 4, 5,
9, -10, -46, -20, 20, -7, -7, -33, -26,
50, 9, -65, -22, -3, -20, 15, 21, 20,
24, -16, -27, -13, 14, 21, -38, -48, 9,
35, 28, 21, 3, -31, -8, 57, 32, -35,
-22, 20, 14, 12, 28, 39, 0, -18, 44,
-2, -17, 53, 0, -27, 33, 43, 5, -10,
25, 47, -3, -4, 36, 15, -12, -3, 29,
41, 23, 23, -8, -32, 15, 37, 0, 3,
22, 31, 1, -20, 27, 2, -50, 0, 33,
16, -16, -17, 18, -26, -34, 31, -27, -84,
-33, 4, -5, -22, -17, -28, -66, -24, 8,
-16, -25, -51, -13, 45, -11, -49, -26, -49,
-38, 21, 10, -52, -58, -19, -4, 9, -31,
-29, 55, 2, -45, 29, 10, -22, 49, 33,
-27, -19, -5, 30, 47, 11, -11, -2, 8,
5, 17, 8, 3, 57, 63, 28, 24, 11,
2, 14, 22, 7, 7, 2, 23, 33, -2,
-8, 14, 7, 20, 57, 32, -5, 12, 23,
10, 17, 26, -18, -72, -6, 74, 61, 13,
-17, -21, -7, 29, 45, 5, -52, -49, 1,
10, 35, 40, -46, -66, 7, 31, -27, -44,
-12, -41, -22, 32, -12, -32, -3, -17, -22,
-22, -31, -30, -23, -13, 3, 0, -21, -19,
-7, -17, -9, 18, -40, -64, 1, 4, -4,
8, -17, -28, -1, 9, -7, -9, 27, 6,
-63, -32, 52, 25, -46, -23, -6, -11, 35,
29, -50, -44, 17, -6, -12, 53, 28, -17,
-9, 28, 34, -20, -18, 22, 43, 28, -6,
8, 14, 19, 28, 14, 27, 26, 12, 76,
66, -18, -2, 18, -12, -1, -2, -1, 51,
30, -18, 5, 14, -12, 2, 13, -25, -9,
32, 7, -5, 15, -12, -33, -18, -13, 6,
0, -25, -12, 1, -17, 0, 13, -24, -27,
4, 35, 14, -22, 5, 13, -18, -30, -10,
-7, -7, 31, 23, -27, -26, 9, 47, 6,
-50, -11, 19, 1, 11, 12, -19, -43, -18,
10, -6, -3, 12, 2, -12, -16, 10, 9,
-25, -21, -10, -13, 0, 8, -1, -9, 10,
4, -34, 14, 46, 5, 18, 24, -15, -7,
20, -1, -13, 7, 11, 14, 11, -2, 8,
27, 10, -1, 13, -2, -7, 48, 44, -15,
-16, -6, 3, 7, -35, -25, 8, -31, -16,
30, 36, 22, -13, -21, -10, 8, 2, -58,
-37, 32, 25, -1, -25, -21, 3, 3, -6,
-11, -3, 2, 4, 34, 22, -25, -19, 0,
-6, -10, -8, -35, -32, 8, -3, -20, -11,
-6, 3, 8, -8, 3, 25, 23, -7, -35,
-15, 8, -20, -6, 15, -44, -29, 19, -5,
-1, 18, 28, 6, -21, 9, 11, -20, -10,
18, 22, 6, -2, 12, 6, 23, 34, -20,
-19, 1, -10, 34, 41, 13, 6, 3, 22,
11, -4, 4, -12, -8, 17, 18, 12, -1,
5, 9, -6, -2, 4, 1, 3, 2, -6,
-32, -25, 9, 18, 27, -4, -54, -29, 2,
-3, -18, -38, -28, -10, 9, 20, 5, -9,
-15, -3, 2, -14, -15, -6, 5, 10, 6,
3, -11, -9, -5, -20, -13, 8, 3, -14,
6, 20, -15, -21, 9, 19, 21, 12, -4,
-21, -17, 16, 27, -4, -28, -2, 26, 9,
-12, -16, -28, -28, -4, 4, -15, -9, 3,
-10, -16, 2, 17, -10, -26, 3, 16, 26,
17, -12, -9, 2, -2, -5, -11, 5, 28,
1, -14, 13, 14, 5, 18, 6, -17, -5,
7, 2, -3, 11, 10, -1, 50, 36, -28,
21, 39, -9, -6, 2, 10, 36, 20, -2,
-3, -11, -10, -6, -5, -4, -8, 2, 17,
1, -13, 11, -13, -36, 11, 14, -19, -6,
3, 0, 20, -5, -24, 12, 7, -11, 2,
-15, -28, -1, 6, -14, -31, -39, -19, 19,
37, 3, -32, -27, -6, 13, 31, 15, -41,
-41, 25, 35, -3, -16, -25, -19, -10, -3,
19, 10, -4, 7, -4, -19, -12, -13, -9,
6, 2, -12, -6, 12, 6, -1, -5, -19,
-7, 7, 40, 56, -3, -13, 21, 24, 7,
-11, -9, -3, 24, 28, -10, 1, 12, 21,
24, -16, -15, 4, -7, -2, 19, 13, -11,
-7, -8, 15, 41, 5, -16, -18, -11, 26,
26, -5, -12, -14, -6, 10, 8, -8, -16,
-16, -3, 10, 1, -3, -3, -2, -15, -18,
6, -4, -4, 21, 4, -2, 15, 13, 0,
-2, 12, 7, -15, -9, 1, -2, 2, -1,
-9, -15, -17, -14, -10, 1, -4, -16, -17,
-1, 18, 8, 1, 22, 11, -19, -10, 4,
-23, -29, 0, -2, -14, -6, 13, 7, -23,
-13, 10, 9, 11, 10, 4, -4, -4, 1,
6, 14, 9, 2, 0, 2, 6, 4, -9,
-18, -8, 8, 18, 8, 13, 9, -27, -22,
-10, -24, -9, 17, 11, 2, 9, 3, -13,
-10, -1, -7, -1, 10, -4, 1, 16, 12,
-6, -14, -2, -5, -1, 0, -1, 6, -9,
-3, 12, 4, 1, -2, 2, 17, 24, 22,
9, 8, 21, 14, -2, -2, 4, -1, -7,
-7, -6, -1, -6, 17, 30, -7, -10, -3,
-19, -18, 2, 21, 4, -20, -6, -1, -18,
-14, -6, -7, -1, 6, 10, 8, -5, 0,
10, -22, -40, -22, 4, 34, 16, -19, -16,
-12, -17, -16, -17, -29, -28, -4, 10, 16,
22, 13, 4, -1, -5, 16, 15, -11, -6,
9, 3, -14, -22, -19, -12, 5, -5, -15,
3, 9, 27, 17, -4, 8, -2, 1, 16,
11, 9, 9, 8, -14, -16, 7, -5, -15,
-11, -5, 19, 25, 25, 43, 21, -9, -9,
-19, -10, 14, -11, -19, 8, 3, 1, 11,
-1, -24, -20, -1, 2, 7, 24, 22, 11,
8, 6, -2, -11, -3, -2, -4, 0, -7,
0, 6, -1, -16, -35, -8, 8, -11, -6,
6, 18, 16, 7, 12, 5, -2, -3, -10,
-21, -27, -10, -3, -3, 8, 0, -9, -10,
-3, 0, -5, 6, 9, 19, 23, 8, -5,
-19, -16, -5, -6, -27, -22, 1, 6, 8,
2, -9, -13, -15, -18, -13, 4, 25, 29,
26, -2, -22, 1, 8, 1, -6, -6, -7,
-20, 0, 13, -14, -24, -24, -21, 2, 14,
16, 23, 15, 10, 10, 5, 0, -26, -32,
3, 19, 5, -8, -7, -8, -3, 17, 27,
-7, -28, 10, 32, 10, 1, 10, 3, -4,
22, 24, -31, -40, 0, 6, 5, 17, 17,
1, 10, 30, 8, -12, -6, 9, 6, -12,
-5, 1, -4, 6, 11, 0, -9, -4, -3,
-4, -3, 2, 0, -2, -9, -27, -23, 2,
13, -6, -9, -3, -12, -2, 10, 6, -7,
-19, -31, -13, 16, 11, -3, -13, -15, 0,
7, -3, -7, -1, -4, 7, 15, 0, -12,
-8, -1, -7, -12, -21, -17, 5, 30, 25,
-6, -6, 0, -12, -8, 2, 13, 11, 1,
5, 4, 4, 10, -1, -20, -12, -4, 3,
15, 11, -7, -24, -4, 8, -2, -14, -25,
-17, 7, 21, 14, 1, 0, 12, 17, 13,
6, 1, 6, 14, 11, -10, -21, -12, -4,
3, -2, -21, -24, -2, 12, 14, 17, 4,
-2, 11, 11, 11, 1, -34, -32, -5, 10,
7, -11, -12, 6, 7, -4, -10, -15, -5,
17, 21, 0, -15, -15, -1, 5, -18, -18,
-10, -9, 24, 27, -9, -14, 0, 9, 25,
22, 1, -7, -2, 16, 13, -14, -10, 7,
0, 2, 15, 2, -9, 5, 10, -5, -3,
10, 3, 0, 15, 15, -1, -3, 8, 6,
-7, -7, 2, 0, -4, 5, -8, -37, -28,
-1, 8, 6, 10, -1, -12, 12, 28, 8,
-17, -16, -15, -17, 1, 6, -4, -8, -4,
-15, -15, 6, -9, -15, 10, 9, -13, -8,
5, -2, -10, 5, 12, -27, -33, 9, 8,
-16, -3, 16, -3, -7, 22, 22, 10, 5,
-11, -16, -4, 9, 12, 6, -3, 2, 2,
-1, 4, -7, -8, 1, 8, 19,
};
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/yes_1000ms_sample_data.cc
|
C++
|
apache-2.0
| 130,685
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This data was created from the PCM data in a WAV file held in v2 of the
// Speech Commands test dataset, at the path:
// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav
// This should contain all 16,000 samples from the one-second file.
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_
#include <cstdint>
extern const int g_yes_1000ms_sample_data_size;
extern const int16_t g_yes_1000ms_sample_data[];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/yes_1000ms_sample_data.h
|
C++
|
apache-2.0
| 1,276
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// See the header for documentation on the meaning of this data.
#include "tensorflow/lite/micro/examples/micro_speech/yes_30ms_sample_data.h"
const int g_yes_30ms_sample_data_size = 480;
const int16_t g_yes_30ms_sample_data[480] = {
-876, -470, 510, 803, 170, -787, -1568, -1893, -1598, -1027,
-992, -1803, -2610, -2484, -1905, -2113, -3113, -3399, -2267, -1261,
-2007, -3637, -3909, -2340, -893, -1158, -2272, -2486, -1639, -915,
-777, -596, -91, 196, 85, 210, 875, 1373, 1247, 1219,
1958, 2718, 2328, 1196, 1008, 2350, 3677, 3269, 1503, 366,
922, 2264, 2810, 1996, 608, -168, 75, 680, 811, 395,
-56, -318, -607, -966, -1108, -925, -613, -368, -369, -919,
-1926, -2460, -1685, -300, 155, -611, -1524, -2204, -3227, -3859,
-2037, 1622, 2382, -2583, -8448, -7544, -84, 4814, 915, -6423,
-7558, -1746, 2515, -59, -4587, -3858, 1260, 3625, 187, -4148,
-3500, 1542, 5467, 4780, 1256, -1127, -403, 2481, 5332, 6346,
5014, 2536, 1216, 2467, 5039, 6238, 5070, 3381, 3269, 4173,
3905, 2248, 1586, 3299, 5240, 4362, 1004, -1382, -489, 2113,
3168, 1620, -742, -1824, -1435, -897, -1058, -1500, -1545, -1398,
-1965, -3266, -4136, -3756, -2609, -1804, -1986, -3087, -4599, -5296,
-4051, -1731, -781, -2228, -4092, -3977, -2325, -1353, -1568, -1490,
-428, 178, -672, -1650, -1058, 749, 2039, 2079, 1540, 897,
310, 572, 2266, 4265, 4265, 1869, -231, 559, 3332, 4752,
3229, 768, 101, 1364, 2463, 1984, 819, 411, 723, 675,
-162, -923, -743, -32, 185, -516, -1653, -2359, -2103, -986,
42, -205, -1702, -2870, -2337, -809, -221, -982, -1544, -946,
-598, -2117, -4291, -4100, -857, 1948, 338, -4799, -7972, -5403,
173, 2371, -1063, -5533, -5578, -1777, 605, -985, -3249, -2213,
1184, 2691, 560, -2356, -2288, 1233, 5244, 6441, 4004, 370,
-663, 2555, 7404, 9282, 6573, 2612, 1836, 4662, 7467, 7393,
5421, 4262, 4741, 5362, 4705, 3163, 2397, 3337, 4887, 4810,
2254, -749, -1316, 772, 2706, 2016, -573, -2552, -2746, -2012,
-1647, -1978, -2579, -3105, -3473, -3911, -4484, -4891, -4795, -4163,
-3543, -3538, -4275, -5356, -5743, -4637, -2614, -1301, -1825, -3341,
-4011, -2937, -751, 1007, 1245, 235, -639, -61, 1626, 2864,
2967, 2734, 3013, 3329, 2914, 2312, 2666, 3839, 4308, 3162,
1453, 768, 1255, 1887, 2006, 1715, 1031, -297, -1660, -1690,
-277, 813, -30, -2137, -3370, -2854, -1553, -593, -413, -1146,
-2567, -3440, -2369, -205, 379, -1258, -2315, -812, 262, -3205,
-8576, -7894, 738, 7492, 1951, -11595, -17098, -6934, 7139, 8065,
-4575, -14199, -8946, 3606, 7504, -547, -8242, -5113, 4406, 8113,
2134, -5040, -4089, 4157, 10934, 10158, 4167, -565, -192, 4428,
9765, 12201, 9861, 4512, 1225, 3451, 8483, 10133, 6497, 2574,
3333, 6806, 6986, 2487, -1214, 623, 5416, 6647, 2204, -3289,
-4556, -1565, 1544, 1525, -1236, -4293, -5695, -5174, -3995, -3403,
-3449, -3750, -4505, -6014, -7296, -6523, -3849, -2096, -3288, -5722,
-6004, -3581, -1497, -1960, -3330, -2800, -434, 964, -111, -1739,
-1136, 1736, 4151, 3736, 1274, -451, 469, 3386, 5833, 5898,
3646, 1085, 272, 1743, 4061, 5108, 3837, 1490, 246, 967,
1866, 859, -1069, -974, 1542, 2835, 47, -4285, -5068, -1567,
1781, 1223, -1997, -4227, -3747, -1720, 41, 245, -1228, -2972,
-2673, 22, 1980, -930, -7721, -11271, -5725, 4974, 8484, -2007,
-16979, -19255, -4670, 11057, 9690, -6417, -17537, -10841, 4262, 9292,
};
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/yes_30ms_sample_data.cc
|
C++
|
apache-2.0
| 4,670
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This data was created from the PCM data in a WAV file held in v2 of the
// Speech Commands test dataset, at the path:
// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav
// The data was extracted starting at an offset of 8,000, which corresponds to
// the 26th spectrogram slice. It's designed to be used to test the
// preprocessing pipeline, to ensure that the expected spectrogram slice is
// produced given this input.
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
#include <cstdint>
extern const int g_yes_30ms_sample_data_size;
extern const int16_t g_yes_30ms_sample_data[];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/micro_speech/yes_30ms_sample_data.h
|
C++
|
apache-2.0
| 1,451
|
/*
* Copyright (C) 2020-2023 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <ulog/ulog.h>
#include "oled/oled.h"
void oled_init(void)
{
sh1106_init();
}
void oled_show(oled_str_e str)
{
OLED_Clear();
if (str == OLED_STR_WAKEUP) {
OLED_Show_String(14, 24, "Hi, I am here!", 16, 1);
} else if (str == OLED_STR_IDLE) {
OLED_Show_String(6, 12, "I am your HaaS", 16, 1);
OLED_Show_String(4, 30, "voice assistant!", 16, 1);
} else if (str == OLED_STR_LIGHT_ON) {
OLED_Show_String(14, 24, "Turn on ...", 16, 1);
} else if (str == OLED_STR_LIGHT_OFF) {
OLED_Show_String(14, 24, "Turn off ...", 16, 1);
}
OLED_Refresh_GRAM();
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/oled/oled.c
|
C
|
apache-2.0
| 750
|
#ifndef _OLED_H_
#define _OLED_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
OLED_STR_IDLE,
OLED_STR_WAKEUP,
OLED_STR_LIGHT_ON,
OLED_STR_LIGHT_OFF,
OLED_STR_MAX
} oled_str_e;
void oled_show(oled_str_e str);
#ifdef __cplusplus
} // extern "C"
#endif
#endif
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/oled/oled.h
|
C
|
apache-2.0
| 294
|
/*
* Copyright (C) 2020-2023 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <ulog/ulog.h>
#include "uvoice_init.h"
#include "uvoice_types.h"
#include "uvoice_event.h"
#include "uvoice_player.h"
#include "uvoice_recorder.h"
#include "ulog/ulog.h"
#include "aos/kernel.h"
#include "uvoice_os.h"
#include "player.h"
#define TAG "player"
static aos_task_t play_task;
static uvoice_player_t *uvocplayer;
static char text_source[256];
static player_cb_t play_done_cb = NULL;
static player_mp3_e g_file = PLAYER_MP3_MAX;
extern int audio_install_codec_driver();
static int get_format_from_name(char *name, media_format_t *format)
{
if (!name || !format) {
LOGE(TAG, "arg null !\n");
return -1;
}
if (strstr(name, ".mp3") || strstr(name, ".MP3"))
*format = MEDIA_FMT_MP3;
else if (strstr(name, ".wav") || strstr(name, ".WAV"))
*format = MEDIA_FMT_WAV;
else if (strstr(name, ".aac") || strstr(name, ".AAC"))
*format = MEDIA_FMT_AAC;
else if (strstr(name, ".m4a") || strstr(name, ".M4A"))
*format = MEDIA_FMT_M4A;
else if (strstr(name, ".pcm") || strstr(name, ".PCM"))
*format = MEDIA_FMT_PCM;
else if (strstr(name, ".spx") || strstr(name, ".SPX"))
*format = MEDIA_FMT_SPX;
else if (strstr(name, ".ogg") || strstr(name, ".OGG"))
*format = MEDIA_FMT_OGG;
else if (strstr(name, ".amrwb") || strstr(name, ".AMRWB"))
*format = MEDIA_FMT_AMRWB;
else if (strstr(name, ".amr") || strstr(name, ".AMR"))
*format = MEDIA_FMT_AMR;
else if (strstr(name, ".opus") || strstr(name, ".OPUS"))
*format = MEDIA_FMT_OPS;
else if (strstr(name, ".flac") || strstr(name, ".FLAC"))
*format = MEDIA_FMT_FLAC;
return 0;
}
static void *play_music(void *arg)
{
media_format_t format = MEDIA_FMT_UNKNOWN;
get_format_from_name(text_source, &format);
if (uvocplayer->set_source(text_source)) {
LOGE(TAG, "set source failed !\n");
return NULL;
}
if (uvocplayer->start()) {
LOGE(TAG, "start failed !\n");
uvocplayer->clr_source();
}
// uvocplayer->wait_complete();
if (play_done_cb)
play_done_cb(g_file);
return NULL;
}
int32_t player_play(player_mp3_e file)
{
int32_t random;
random = rand() % 240;
LOG("random: %d\n", random);
memset(text_source, 0, sizeof(text_source));
if (file == PLAYER_MP3_WELCOME) {
strcpy(text_source, "fs:/data/mp3/welcome.mp3");
} else if (file == PLAYER_MP3_WAKEUP) {
// if (random < 100) {
// strcpy(text_source, "fs:/data/mp3/haas_intro.mp3");
// } else
if (random > 0 && random < 150) {
strcpy(text_source, "fs:/data/mp3/zhurenyoushenmkeyibangnin.mp3");
} else if (random > 150 && random < 200) {
strcpy(text_source, "fs:/data/mp3/zhurenwozai.mp3");
} else {
strcpy(text_source, "fs:/data/mp3/eiwozai.mp3");
}
g_file = PLAYER_MP3_WAKEUP;
} else if (file == PLAYER_MP3_LIGHT_ON) {
strcpy(text_source, "fs:/data/mp3/haodeyiweinindakai.mp3");
g_file = PLAYER_MP3_LIGHT_ON;
} else if (file == PLAYER_MP3_LIGHT_OFF) {
strcpy(text_source, "fs:/data/mp3/haodeyiweininguanbi.mp3");
g_file = PLAYER_MP3_LIGHT_OFF;
}
#if 1
/*set player enter idle state after a long time*/
uvocplayer->set_standby(2147483646);
play_music(NULL);
uvocplayer->wait_complete();
aos_msleep(500);
#else
aos_task_new_ext(&play_task,
"play music task", play_music,
NULL, 8192, 0);
#endif
return 0;
}
int32_t player_stop(void)
{
player_state_t state;
/*stop and clear current playing*/
uvocplayer->set_fade(40, 40);
uvocplayer->stop_async();
uvocplayer->clr_source();
}
int32_t player_wait_complete(void)
{
int32_t ret;
ret = uvocplayer->wait_complete();
if (ret < 0)
aos_msleep(1000);
aos_msleep(500);
}
int32_t player_init(player_cb_t cb)
{
int32_t ret;
/*Init uvoice to play mp3*/
ret = uvoice_init();
if (ret < 0) {
LOGE(TAG, "uvoice_init failed !\n");
return -1;
}
/*create uvoice player*/
uvocplayer = uvoice_player_create();
if (!uvocplayer) {
LOGE(TAG, "create media player failed !\n");
return -1;
}
/*set eq*/
uvocplayer->eq_enable(0);
/*set play volume*/
uvocplayer->set_volume(10);
play_done_cb = cb;
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/player/player.c
|
C
|
apache-2.0
| 4,560
|
#ifndef _PLAYER_H_
#define _PLAYER_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
PLAYER_MP3_WELCOME,
PLAYER_MP3_WAKEUP,
PLAYER_MP3_LIGHT_ON,
PLAYER_MP3_LIGHT_OFF,
PLAYER_MP3_MAX
} player_mp3_e;
typedef void (*player_cb_t)(player_mp3_e file);
int32_t player_init(player_cb_t cb);
int32_t player_play(player_mp3_e file);
int32_t player_wait_complete(void);
int32_t player_stop(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/player/player.h
|
C
|
apache-2.0
| 492
|
/*
* Copyright (C) 2020-2023 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <ulog/ulog.h>
#include "uvoice_init.h"
#include "uvoice_types.h"
#include "uvoice_event.h"
#include "uvoice_recorder.h"
#include "ulog/ulog.h"
#include "aos/kernel.h"
#include "uvoice_os.h"
#define TAG "recorder"
static uvoice_recorder_t *mrecorder = NULL;
int32_t recorder_init(void)
{
mrecorder = uvoice_recorder_create();
if (!mrecorder) {
LOGE(TAG, "create uvoice recorder failed !\n");
return -1;
}
return 0;
}
int32_t recorder_uninit(void)
{
return mrecorder->stop();
}
int32_t recorder_start(media_format_t fmt, int32_t rate, int32_t channels, int32_t bits, int32_t samples, char *sink)
{
int32_t len = 0, ret = 0;
ret = mrecorder->set_sink(fmt, rate, channels, bits, samples, 0, sink);
if (ret < 0) {
LOGE(TAG, "uvoice recorder set_sink failed !\n");
return -1;
}
ret = mrecorder->start();
if (ret < 0) {
LOGE(TAG, "uvoice recorder start failed !\n");
return -1;
}
return 0;
}
int32_t recorder_stop(void)
{
return mrecorder->clr_sink();
}
int32_t recorder_get_stream(uint8_t *buf, int32_t read_size)
{
return mrecorder->get_stream(buf, read_size);
}
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/recorder/recorder.c
|
C
|
apache-2.0
| 1,307
|
#ifndef _RECORDER_H_
#define _RECORDER_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#include "uvoice_types.h"
int32_t recorder_init(void);
int32_t recorder_uninit(void);
int32_t recorder_start(media_format_t fmt, int32_t rate, int32_t channels, int32_t bits, int32_t samples, char *sink);
int32_t recorder_stop(void);
int32_t recorder_get_stream(uint8_t *buf, int32_t read_size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _RECORDER_H_
|
YifuLiu/AliOS-Things
|
solutions/tflite_micro_speech_demo/recorder/recorder.h
|
C
|
apache-2.0
| 467
|
CPRE := @
ifeq ($(V),1)
CPRE :=
VERB := --verbose
endif
MK_GENERATED_IMGS_PATH:=generated
PRODUCT_BIN:=product
.PHONY:startup
startup: all
all:
@echo "Build Solution by $(BOARD) "
$(CPRE) scons $(VERB) --board=$(BOARD) $(MULTITHREADS) $(MAKEFLAGS)
@echo ucloud ai demo build Done
@echo [INFO] Create bin files
# $(CPRE) $(PRODUCT_BIN) image $(MK_GENERATED_IMGS_PATH)/images.zip -i $(MK_GENERATED_IMGS_PATH)/data -l -p
# $(CPRE) $(PRODUCT_BIN) image $(MK_GENERATED_IMGS_PATH)/images.zip -e $(MK_GENERATED_IMGS_PATH) -x
.PHONY:flash
flash:
$(CPRE) $(PRODUCT_BIN) flash $(MK_GENERATED_IMGS_PATH)/images.zip -w prim
.PHONY:flashall
flashall:
$(CPRE) $(PRODUCT_BIN) flash $(MK_GENERATED_IMGS_PATH)/images.zip -a
sdk:
$(CPRE) haas sdk
.PHONY:clean
clean:
$(CPRE) scons -c --board=$(BOARD)
$(CPRE) find . -name "*.[od]" -delete
$(CPRE) rm yoc_sdk yoc.* generated out -rf
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/Makefile
|
Makefile
|
apache-2.0
| 882
|
#! /bin/env python
from aostools import Make
# defconfig = Make(elf='yoc.elf', objcopy='generated/data/prim', objdump='yoc.asm')
defconfig = Make(elf='aos.elf', objcopy='aos.bin')
Export('defconfig')
defconfig.build_components()
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/SConstruct
|
Python
|
apache-2.0
| 233
|
#!/usr/bin/env python3
import os
import sys
import getpass
import shutil
#!/usr/bin/env python3
import os
import sys
import getpass
import shutil
comp_path = sys.path[0]
print("comp_path:")
print(comp_path)
# original folder
org_image_path = comp_path + "/resources/image"
org_font_path = comp_path + "/resources/font"
# new folder
data_path = comp_path + "/../../hardware/chip/haas1000/prebuild/data/data"
image_path = data_path + "/ai_demo_image"
font_path = data_path + "/font"
# delete prebuild/data resources
if os.path.exists(image_path):
print ('Delete /data/ai_demo_image firstly')
shutil.rmtree(image_path)
if os.path.exists(font_path):
print ('Delete /data/font firstly')
shutil.rmtree(font_path)
# copy resources
shutil.copytree(org_image_path, image_path)
shutil.copytree(org_font_path, font_path)
# result
print("run external script success")
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/cp_resources.py
|
Python
|
apache-2.0
| 882
|
/*
* 这个例程演示了用SDK配置MQTT参数并建立连接, 之后创建2个线程
*
* + 一个线程用于保活长连接
* + 一个线程用于接收消息, 并在有消息到达时进入默认的数据回调, 在连接状态变化时进入事件回调
*
* 接着演示了在MQTT连接上进行属性上报, 事件上报, 以及处理收到的属性设置, 服务调用, 取消这些代码段落的注释即可观察运行效果
*
* 需要用户关注或修改的部分, 已经用 TODO 在注释中标明
*
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <aos/kernel.h>
#include <aiconfig.h>
#include "aiot_state_api.h"
#include "aiot_sysdep_api.h"
#include "aiot_mqtt_api.h"
#include "aiot_dm_api.h"
/* 位于portfiles/aiot_port文件夹下的系统适配函数集合 */
extern aiot_sysdep_portfile_t g_aiot_sysdep_portfile;
/* 位于external/ali_ca_cert.c中的服务器证书 */
extern const char *ali_ca_cert;
static uint8_t g_mqtt_process_thread_running = 0;
static uint8_t g_mqtt_recv_thread_running = 0;
static void *g_dm_handle = NULL;
/*开发者需要在此更换你的三元组信息:产品key、secret、设备名称、设备密钥*/
#define PRODUCT_KEY "<Your-Product-Key>"
#define PRODUCT_SECRET "<Your-Product-Secret>"
#define DEVICE_NAME "<Your-Device-Name>"
#define DEVICE_SECRET "<Your-Device-Secret>"
/* TODO: 如果要关闭日志, 就把这个函数实现为空, 如果要减少日志, 可根据code选择不打印
*
* 例如: [1577589489.033][LK-0317] mqtt_basic_linkkit&a13FN5TplKq
*
* 上面这条日志的code就是0317(十六进制), code值的定义见core/aiot_state_api.h
*
*/
/* 日志回调函数, SDK的日志会从这里输出 */
int32_t linkkit_state_logcb(int32_t code, char *message)
{
printf("%s", message);
return 0;
}
/* MQTT事件回调函数, 当网络连接/重连/断开时被触发, 事件定义见core/aiot_mqtt_api.h */
void linkkit_mqtt_event_handler(void *handle, const aiot_mqtt_event_t *event, void *userdata)
{
switch (event->type) {
/* SDK因为用户调用了aiot_mqtt_connect()接口, 与mqtt服务器建立连接已成功 */
case AIOT_MQTTEVT_CONNECT: {
printf("AIOT_MQTTEVT_CONNECT\n");
}
break;
/* SDK因为网络状况被动断连后, 自动发起重连已成功 */
case AIOT_MQTTEVT_RECONNECT: {
printf("AIOT_MQTTEVT_RECONNECT\n");
}
break;
/* SDK因为网络的状况而被动断开了连接, network是底层读写失败, heartbeat是没有按预期得到服务端心跳应答 */
case AIOT_MQTTEVT_DISCONNECT: {
char *cause = (event->data.disconnect == AIOT_MQTTDISCONNEVT_NETWORK_DISCONNECT) ? ("network disconnect") :
("heartbeat disconnect");
printf("AIOT_MQTTEVT_DISCONNECT: %s\n", cause);
}
break;
default: {
}
}
}
/* 执行aiot_mqtt_process的线程, 包含心跳发送和QoS1消息重发 */
void *linkkit_mqtt_process_thread(void *args)
{
int32_t res = STATE_SUCCESS;
while (g_mqtt_process_thread_running) {
res = aiot_mqtt_process(args);
if (res == STATE_USER_INPUT_EXEC_DISABLED) {
break;
}
aos_msleep(1000);
}
return NULL;
}
/* 执行aiot_mqtt_recv的线程, 包含网络自动重连和从服务器收取MQTT消息 */
void *linkkit_mqtt_recv_thread(void *args)
{
int32_t res = STATE_SUCCESS;
while (g_mqtt_recv_thread_running) {
res = aiot_mqtt_recv(args);
if (res < STATE_SUCCESS) {
if (res == STATE_USER_INPUT_EXEC_DISABLED) {
break;
}
aos_msleep(1000);
}
}
return NULL;
}
/* 用户数据接收处理回调函数 */
static void linkkit_dm_recv_handler(void *dm_handle, const aiot_dm_recv_t *recv, void *userdata)
{
printf("linkkit_dm_recv_handler, type = %d\r\n", recv->type);
switch (recv->type) {
/* 属性上报, 事件上报, 获取期望属性值或者删除期望属性值的应答 */
case AIOT_DMRECV_GENERIC_REPLY: {
printf("msg_id = %d, code = %d, data = %.*s, message = %.*s\r\n",
recv->data.generic_reply.msg_id,
recv->data.generic_reply.code,
recv->data.generic_reply.data_len,
recv->data.generic_reply.data,
recv->data.generic_reply.message_len,
recv->data.generic_reply.message);
}
break;
/* 属性设置 */
case AIOT_DMRECV_PROPERTY_SET: {
printf("msg_id = %ld, params = %.*s\r\n",
(unsigned long)recv->data.property_set.msg_id,
recv->data.property_set.params_len,
recv->data.property_set.params);
/* TODO: 以下代码演示如何对来自云平台的属性设置指令进行应答, 用户可取消注释查看演示效果 */
/*
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_PROPERTY_SET_REPLY;
msg.data.property_set_reply.msg_id = recv->data.property_set.msg_id;
msg.data.property_set_reply.code = 200;
msg.data.property_set_reply.data = "{}";
int32_t res = aiot_dm_send(dm_handle, &msg);
if (res < 0) {
printf("aiot_dm_send failed\r\n");
}
}
*/
}
break;
/* 异步服务调用 */
case AIOT_DMRECV_ASYNC_SERVICE_INVOKE: {
printf("msg_id = %ld, service_id = %s, params = %.*s\r\n",
(unsigned long)recv->data.async_service_invoke.msg_id,
recv->data.async_service_invoke.service_id,
recv->data.async_service_invoke.params_len,
recv->data.async_service_invoke.params);
/* TODO: 以下代码演示如何对来自云平台的异步服务调用进行应答, 用户可取消注释查看演示效果
*
* 注意: 如果用户在回调函数外进行应答, 需要自行保存msg_id, 因为回调函数入参在退出回调函数后将被SDK销毁, 不可以再访问到
*/
/*
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_ASYNC_SERVICE_REPLY;
msg.data.async_service_reply.msg_id = recv->data.async_service_invoke.msg_id;
msg.data.async_service_reply.code = 200;
msg.data.async_service_reply.service_id = "ToggleLightSwitch";
msg.data.async_service_reply.data = "{\"dataA\": 20}";
int32_t res = aiot_dm_send(dm_handle, &msg);
if (res < 0) {
printf("aiot_dm_send failed\r\n");
}
}
*/
}
break;
/* 同步服务调用 */
case AIOT_DMRECV_SYNC_SERVICE_INVOKE: {
printf("msg_id = %ld, rrpc_id = %s, service_id = %s, params = %.*s\r\n",
(unsigned long)recv->data.sync_service_invoke.msg_id,
recv->data.sync_service_invoke.rrpc_id,
recv->data.sync_service_invoke.service_id,
recv->data.sync_service_invoke.params_len,
recv->data.sync_service_invoke.params);
/* TODO: 以下代码演示如何对来自云平台的同步服务调用进行应答, 用户可取消注释查看演示效果
*
* 注意: 如果用户在回调函数外进行应答, 需要自行保存msg_id和rrpc_id字符串, 因为回调函数入参在退出回调函数后将被SDK销毁, 不可以再访问到
*/
/*
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_SYNC_SERVICE_REPLY;
msg.data.sync_service_reply.rrpc_id = recv->data.sync_service_invoke.rrpc_id;
msg.data.sync_service_reply.msg_id = recv->data.sync_service_invoke.msg_id;
msg.data.sync_service_reply.code = 200;
msg.data.sync_service_reply.service_id = "SetLightSwitchTimer";
msg.data.sync_service_reply.data = "{}";
int32_t res = aiot_dm_send(dm_handle, &msg);
if (res < 0) {
printf("aiot_dm_send failed\r\n");
}
}
*/
}
break;
/* 下行二进制数据 */
case AIOT_DMRECV_RAW_DATA: {
printf("raw data len = %d\r\n", recv->data.raw_data.data_len);
/* TODO: 以下代码演示如何发送二进制格式数据, 若使用需要有相应的数据透传脚本部署在云端 */
/*
{
aiot_dm_msg_t msg;
uint8_t raw_data[] = {0x01, 0x02};
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_RAW_DATA;
msg.data.raw_data.data = raw_data;
msg.data.raw_data.data_len = sizeof(raw_data);
aiot_dm_send(dm_handle, &msg);
}
*/
}
break;
/* 二进制格式的同步服务调用, 比单纯的二进制数据消息多了个rrpc_id */
case AIOT_DMRECV_RAW_SYNC_SERVICE_INVOKE: {
printf("raw sync service rrpc_id = %s, data_len = %d\r\n",
recv->data.raw_service_invoke.rrpc_id,
recv->data.raw_service_invoke.data_len);
}
break;
default:
break;
}
}
/* 属性上报函数演示 */
int32_t linkkit_send_property_post(void *dm_handle, char *params)
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_PROPERTY_POST;
msg.data.property_post.params = params;
return aiot_dm_send(dm_handle, &msg);
}
/* 事件上报函数演示 */
int32_t linkkit_send_event_post(void *dm_handle, char *event_id, char *params)
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_EVENT_POST;
msg.data.event_post.event_id = event_id;
msg.data.event_post.params = params;
return aiot_dm_send(dm_handle, &msg);
}
/* 演示了获取属性LightSwitch的期望值, 用户可将此函数加入到main函数中运行演示 */
int32_t linkkit_send_get_desred_requset(void *dm_handle)
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_GET_DESIRED;
msg.data.get_desired.params = "[\"LightSwitch\"]";
return aiot_dm_send(dm_handle, &msg);
}
/* 演示了删除属性LightSwitch的期望值, 用户可将此函数加入到main函数中运行演示 */
int32_t linkkit_send_delete_desred_requset(void *dm_handle)
{
aiot_dm_msg_t msg;
memset(&msg, 0, sizeof(aiot_dm_msg_t));
msg.type = AIOT_DMMSG_DELETE_DESIRED;
msg.data.get_desired.params = "{\"LightSwitch\":{}}";
return aiot_dm_send(dm_handle, &msg);
}
void user_post_event(char type_t)
{
int32_t res = 0;
char *event_id = "boss_face_detection";
char *event_payload;
if (type_t == 0)
event_payload = "{\"face_detected\": 0}";
else
event_payload = "{\"face_detected\": 1}";
res = linkkit_send_event_post(g_dm_handle, event_id, event_payload);
if (res < 0)
printf("linkkit_send_event_post fail, res = %d\n", res);
}
int linkkit_main(void *paras)
{
int32_t res = STATE_SUCCESS;
void *mqtt_handle = NULL;
char *url = "iot-as-mqtt.cn-shanghai.aliyuncs.com"; /* 阿里云平台上海站点的域名后缀 */
char host[100] = {0}; /* 用这个数组拼接设备连接的云平台站点全地址, 规则是 ${productKey}.iot-as-mqtt.cn-shanghai.aliyuncs.com */
uint16_t port = 443; /* 无论设备是否使用TLS连接阿里云平台, 目的端口都是443 */
aiot_sysdep_network_cred_t cred; /* 安全凭据结构体, 如果要用TLS, 这个结构体中配置CA证书等参数 */
/* TODO: 替换为自己设备的三元组 */
char *product_key = PRODUCT_KEY;
char *device_name = DEVICE_NAME;
char *device_secret = DEVICE_SECRET;
/* 配置SDK的底层依赖 */
aiot_sysdep_set_portfile(&g_aiot_sysdep_portfile);
/* 配置SDK的日志输出 */
aiot_state_set_logcb(linkkit_state_logcb);
/* 创建SDK的安全凭据, 用于建立TLS连接 */
memset(&cred, 0, sizeof(aiot_sysdep_network_cred_t));
cred.option = AIOT_SYSDEP_NETWORK_CRED_SVRCERT_CA; /* 使用RSA证书校验MQTT服务端 */
cred.max_tls_fragment = 16384; /* 最大的分片长度为16K, 其它可选值还有4K, 2K, 1K, 0.5K */
cred.sni_enabled = 1; /* TLS建连时, 支持Server Name Indicator */
cred.x509_server_cert = ali_ca_cert; /* 用来验证MQTT服务端的RSA根证书 */
cred.x509_server_cert_len = strlen(ali_ca_cert); /* 用来验证MQTT服务端的RSA根证书长度 */
/* 创建1个MQTT客户端实例并内部初始化默认参数 */
mqtt_handle = aiot_mqtt_init();
if (mqtt_handle == NULL) {
printf("aiot_mqtt_init failed\n");
return -1;
}
snprintf(host, 100, "%s.%s", product_key, url);
/* 配置MQTT服务器地址 */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_HOST, (void *)host);
/* 配置MQTT服务器端口 */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_PORT, (void *)&port);
/* 配置设备productKey */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_PRODUCT_KEY, (void *)product_key);
/* 配置设备deviceName */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_DEVICE_NAME, (void *)device_name);
/* 配置设备deviceSecret */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_DEVICE_SECRET, (void *)device_secret);
/* 配置网络连接的安全凭据, 上面已经创建好了 */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_NETWORK_CRED, (void *)&cred);
/* 配置MQTT事件回调函数 */
aiot_mqtt_setopt(mqtt_handle, AIOT_MQTTOPT_EVENT_HANDLER, (void *)linkkit_mqtt_event_handler);
/* 创建DATA-MODEL实例 */
g_dm_handle = aiot_dm_init();
if (g_dm_handle == NULL) {
printf("aiot_dm_init failed");
return -1;
}
/* 配置MQTT实例句柄 */
aiot_dm_setopt(g_dm_handle, AIOT_DMOPT_MQTT_HANDLE, mqtt_handle);
/* 配置消息接收处理回调函数 */
aiot_dm_setopt(g_dm_handle, AIOT_DMOPT_RECV_HANDLER, (void *)linkkit_dm_recv_handler);
/* 与服务器建立MQTT连接 */
res = aiot_mqtt_connect(mqtt_handle);
if (res < STATE_SUCCESS) {
/* 尝试建立连接失败, 销毁MQTT实例, 回收资源 */
aiot_mqtt_deinit(&mqtt_handle);
printf("aiot_mqtt_connect failed: -0x%04X\n", -res);
return -1;
}
/* 创建一个单独的线程, 专用于执行aiot_mqtt_process, 它会自动发送心跳保活, 以及重发QoS1的未应答报文 */
g_mqtt_process_thread_running = 1;
res = aos_task_new("linkkit_mqtt_process", linkkit_mqtt_process_thread, mqtt_handle, 4096);
// res = pthread_create(&g_mqtt_process_thread, NULL, linkkit_mqtt_process_thread, mqtt_handle);
if (res != 0) {
printf("create linkkit_mqtt_process_thread failed: %d\n", res);
return -1;
}
/* 创建一个单独的线程用于执行aiot_mqtt_recv, 它会循环收取服务器下发的MQTT消息, 并在断线时自动重连 */
g_mqtt_recv_thread_running = 1;
res = aos_task_new("linkkit_mqtt_process", linkkit_mqtt_recv_thread, mqtt_handle, 4096);
// res = pthread_create(&g_mqtt_recv_thread, NULL, linkkit_mqtt_recv_thread, mqtt_handle);
if (res != 0) {
printf("create linkkit_mqtt_recv_thread failed: %d\n", res);
return -1;
}
/* 主循环进入休眠 */
while (1) {
/* TODO: 以下代码演示了简单的属性上报和事件上报, 用户可取消注释观察演示效果 */
// linkkit_send_property_post(dm_handle, "{\"LightSwitch\": 0}");
// linkkit_send_event_post(dm_handle, "Error", "{\"ErrorCode\": 0}");
aos_msleep(10000);
}
/* 断开MQTT连接, 一般不会运行到这里 */
res = aiot_mqtt_disconnect(mqtt_handle);
if (res < STATE_SUCCESS) {
aiot_mqtt_deinit(&mqtt_handle);
printf("aiot_mqtt_disconnect failed: -0x%04X\n", -res);
return -1;
}
/* 销毁DATA-MODEL实例, 一般不会运行到这里 */
res = aiot_dm_deinit(&g_dm_handle);
if (res < STATE_SUCCESS) {
printf("aiot_dm_deinit failed: -0x%04X\n", -res);
return -1;
}
/* 销毁MQTT实例, 一般不会运行到这里 */
res = aiot_mqtt_deinit(&mqtt_handle);
if (res < STATE_SUCCESS) {
printf("aiot_mqtt_deinit failed: -0x%04X\n", -res);
return -1;
}
g_mqtt_process_thread_running = 0;
g_mqtt_recv_thread_running = 0;
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/linkkit_event.c
|
C
|
apache-2.0
| 17,197
|
/*
* Copyright (c) 2014-2016 Alibaba Group. All rights reserved.
* License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include "aos/kernel.h"
#include "netmgr.h"
#include "ulog/ulog.h"
#include "aiconfig.h"
#include <uservice/uservice.h>
#include <uservice/eventid.h>
#include "aiagent_common.h"
#include "ugraphics.h"
#ifdef AOS_USE_BEEPER
#include "beeper.h"
#endif
static char linkkit_started = 0;
extern void ucloud_ai_demo_main(void *p);
extern int linkkit_main(void *paras);
extern void set_iotx_info(void);
extern void do_comparing_facebody_async(void);
extern void do_recognize_expression_async(void);
static void wifi_service_event(uint32_t event_id, const void *param, void *context)
{
int ai_model = AI_MODEL;
if (event_id != EVENT_NETMGR_SNTP_SUCCESS) {
return;
}
if (!linkkit_started) {
printf("start to do ucloud_ai_demo\n");
aos_task_new("ucloud_ai_demo_main", ucloud_ai_demo_main, NULL, 1024 * 20);
if (ai_model == AI_MODEL_COMPARING_FACEBODY)
aos_task_new("linkkit", (void (*)(void *))linkkit_main, NULL, 1024 * 10);
linkkit_started = 1;
}
}
int application_start(int argc, char **argv)
{
aos_set_log_level(AOS_LL_DEBUG);
/*Init ugraphics component*/
ugraphics_init(SCREEN_W, SCREEN_H);
/*Set screen default color*/
ugraphics_set_color(COLOR_RED);
/*Load default font*/
ugraphics_load_font("/data/font/Alibaba-PuHuiTi-Heavy.ttf", 18);
/*Set default font style*/
ugraphics_set_font_style(UGRAPHICS_FONT_STYLE);
#ifdef AOS_OLED_SH1106
sh1106_init();
#endif
/*init event service*/
event_service_init(NULL);
/*init network service*/
netmgr_service_init(NULL);
/*enable network auto reconnect*/
netmgr_set_auto_reconnect(NULL, true);
/*enable auto save wifi config*/
netmgr_wifi_set_auto_save_ap(true);
/*Subscribe wifi service event*/
event_subscribe(EVENT_NETMGR_SNTP_SUCCESS, wifi_service_event, NULL);
while (1) {
aos_msleep(1000);
};
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/main.c
|
C
|
apache-2.0
| 2,682
|
/*
* Copyright (C) 2015-2020 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <stdlib.h>
#include <aos/kernel.h>
#include "aos/init.h"
#include "board.h"
#include <k_api.h>
#ifndef AOS_BINS
extern int application_start(int argc, char *argv[]);
#endif
/*
If board have no component for example board_xx_init, it indicates that this app does not support this board.
Set the correspondence in file platform\board\aaboard_demo\ucube.py.
*/
extern void board_tick_init(void);
extern void board_stduart_init(void);
extern void board_dma_init(void);
extern void board_gpio_init(void);
extern void board_network_init(void);
extern void board_kinit_init(kinit_t *init_args);
extern void board_flash_init(void);
/* For user config
kinit.argc = 0;
kinit.argv = NULL;
kinit.cli_enable = 1;
*/
static kinit_t kinit = {0, NULL, 1};
/**
* @brief Board Initialization Function
* @param None
* @retval None
*/
void board_init(void)
{
board_tick_init();
board_stduart_init();
board_dma_init();
board_gpio_init();
board_flash_init();
/*FOR STM32F429 delete hal_i2c_pre_init \I2C1_init\CAN_init here*/
}
void aos_maintask(void *arg)
{
board_init();
board_kinit_init(&kinit);
aos_components_init(&kinit);
#ifndef AOS_BINS
application_start(kinit.argc, kinit.argv); /* jump to app entry */
#endif
}
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/maintask.c
|
C
|
apache-2.0
| 1,362
|
/*
* Copyright (C) 2015-2018 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <string.h>
#include "ulog/ulog.h"
#include "aos/kernel.h"
#include "ugraphics.h"
#include "aiconfig.h"
#include "aiagent_service.h"
#include "ucamera_service.h"
#ifdef AOS_OLED_SH1106
#include "sh1106.h"
#endif
#ifdef AOS_USE_BEEPER
#include "beeper.h"
#endif
#define TAG "UCLOUD_AI"
#define CAPTURED_IMAGE "/data/capture.jpg"
// #define USE_CURL
#define LOG printf
#include <httpclient.h>
#ifdef CONFIG_ALICLOUD_FACEBODY_ENABLE
static float g_confidence = 0.0;
static char *p_expression = NULL;
#endif
extern void user_post_event(char type_t);
#ifdef CONFIG_ALICLOUD_FACEBODY_ENABLE
static int facebody_compare_callback(ai_result_t *result)
{
int ret;
float confidence;
int x, y, w, h;
if (!result)
return -1;
confidence = result->facebody.face.confidence;
x = result->facebody.face.location.x;
y = result->facebody.face.location.y;
w = result->facebody.face.location.w;
h = result->facebody.face.location.h;
/*judge if boss is coming*/
if (confidence > 60) {
LOG("==============================================\n");
LOG("=================boss is coming===============\n");
LOG("==============================================\n");
/*post event to Iot platform*/
// user_post_event(1);
LOG("draw string\n");
ret = ugraphics_draw_string("Warning!!!", 120, 100);
ret = ugraphics_draw_string("boss is coming!!!", 100, 120);
/*show string on OLED for HaaS EDK*/
#ifdef AOS_OLED_SH1106
OLED_Clear();
OLED_Show_String(24, 12, "Warning!!!", 12, 1);
OLED_Show_String(12, 36, "Boss is coming!", 12, 1);
OLED_Refresh_GRAM();
#endif
/*beeper alarm for HaaS EDK*/
#ifdef AOS_USE_BEEPER
beeper_start();
#endif
} else {
#ifdef AOS_OLED_SH1106
OLED_Clear();
OLED_Show_String(12, 24, "It's safe now!", 12, 1);
OLED_Refresh_GRAM();
#endif
/*stop beeper alarm*/
#ifdef AOS_USE_BEEPER
beeper_stop();
#endif
}
return 0;
}
static int recognize_expression_callback(ai_result_t *result)
{
int len;
char *expression = NULL;
float face_probability;
if (!result)
return -1;
expression = result->facebody.expression.expression;
face_probability = result->facebody.expression.probability;
if (!expression)
return -1;
ugraphics_draw_image("/data/ai_demo_image/sadness.jpg", 20, 20);
ugraphics_draw_image("/data/ai_demo_image/happiness.jpg", 80, 20);
ugraphics_draw_image("/data/ai_demo_image/surprise.jpg", 140, 20);
LOG("expression: %s\n", expression);
/*draw image to lcd screen*/
if (!strcmp(expression, "sadness")) {
ugraphics_draw_image("/data/ai_demo_image/right.jpg", 20, 70);
} else if (!strcmp(expression, "happiness")) {
ugraphics_draw_image("/data/ai_demo_image/right.jpg", 80, 70);
} else if (!strcmp(expression, "surprise")) {
ugraphics_draw_image("/data/ai_demo_image/right.jpg", 140, 70);
} else {
/*do nothing*/
}
return 0;
}
static int generate_human_anime_styple_callback(ai_result_t *result)
{
int ret;
int image_len;
char *image = NULL;
if (!result)
return -1;
/*save human anime style image to file system*/
image_len = result->facebody.anime.imageLen;
image = result->facebody.anime.image;
if (!image) {
LOGE(TAG, "image is null\n");
return -1;
}
ret = ugraphics_save_image(image, image_len, "/data/humananime.png");
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
return -1;
}
/*draw image to lcd screen*/
ugraphics_draw_image("/data/humananime.png", 0, 0);
return ret;
}
#endif
#ifdef CONFIG_ALICLOUD_OBJECTDET_ENABLE
static int detect_object_callback(ai_result_t *result)
{
int len = 0;
char *p_type = NULL;
int x, y, w, h;
char *type = NULL;
float score;
if (!result)
return -1;
LOG("detect_object_callback\n");
type = result->objectdet.object.type;
score = result->objectdet.object.score;
x = result->objectdet.object.box.x;
y = result->objectdet.object.box.y;
w = result->objectdet.object.box.w;
h = result->objectdet.object.box.h;
if (!type) {
LOGE(TAG, "type is null\n");
return -1;
}
/*draw image to lcd screen*/
LOG("type: %s\n", type);
ugraphics_draw_string(type, x, y);
ugraphics_draw_rect(x, y, w, h);
ugraphics_draw_rect(x + 1, y + 1, w - 2, h - 2);
ugraphics_draw_rect(x + 2, y + 2, w - 4, h - 4);
return 0;
}
static int detect_main_body_callback(ai_result_t *result)
{
int x, y, w, h;
if (!result)
return -1;
x = result->objectdet.mainbody.location.x;
y = result->objectdet.mainbody.location.y;
w = result->objectdet.mainbody.location.w;
h = result->objectdet.mainbody.location.h;
/*draw rect to lcd screen*/
ugraphics_draw_rect(x, y, w, h);
/*fill boarder*/
ugraphics_draw_rect(x + 1, y + 1, w - 2, h - 2);
ugraphics_draw_rect(x + 2, y + 2, w - 4, h - 4);
return 0;
}
#endif
#ifdef CONFIG_ALICLOUD_IMAGESEG_ENABLE
static int segment_common_image_callback(ai_result_t *result)
{
int ret;
int image_len = 0;
char *image = NULL;
if (!result)
return -1;
/*save image to file system*/
image = result->imageseg.common.image;
image_len = result->imageseg.common.imageLen;
if (!image) {
LOGE(TAG, "image is null\n");
return -1;
}
ret = ugraphics_save_image(image, image_len, "/data/segment_common_image.png");
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
return -1;
}
/*draw image to lcd screen*/
ugraphics_draw_image("/data/segment_common_image.png", 0, 0);
return 0;
}
static int segment_face_callback(ai_result_t *result)
{
int ret;
int image_len = 0;
int x, y;
int index;
char *image = NULL;
char path[32] = {0};
if (!result)
return -1;
/*clear capture background*/
ugraphics_clear();
/*save image*/
image = result->imageseg.face.image;
image_len = result->imageseg.face.imageLen;
ret = ugraphics_save_image(image, image_len, "/data/segment_face.png");
if (ret < 0) {
LOGE(TAG, "save_captured_image fail\n");
return -1;
}
/*draw image to lcd screen*/
x = result->imageseg.face.location.x;
y = result->imageseg.face.location.y;
ugraphics_draw_image("/data/segment_face.png", x, y);
return 0;
}
#endif
#ifdef CONFIG_ALICLOUD_OCR_ENABLE
static int recognize_identity_card_face_side_callback(ai_result_t *result)
{
char *address = NULL;
char *birthdate = NULL;
char *gender = NULL;
char *nationality = NULL;
char *id_num = NULL;
float card_x[4], card_y[4], face_x[4], face_y[4];
if (!result)
return -1;
/*draw address string*/
address = result->ocr.identity.face.address;
if (address && strlen(address) > 0) {
ugraphics_draw_string(address, 20, 20);
}
/*draw birthdate string*/
birthdate = result->ocr.identity.face.birthDate;
if (birthdate && strlen(birthdate) > 0) {
ugraphics_draw_string(birthdate, 20, 40);
}
/*draw gender string*/
gender = result->ocr.identity.face.gender;
if (gender && strlen(gender) > 0) {
ugraphics_draw_string(gender, 20, 60);
}
/*draw nationality string*/
nationality = result->ocr.identity.face.nationality;
if (nationality && strlen(nationality) > 0) {
ugraphics_draw_string(nationality, 20, 80);
}
/*draw id number string*/
id_num = result->ocr.identity.face.iDNumber;
if (id_num && strlen(id_num) > 0) {
ugraphics_draw_string(id_num, 20, 100);
}
/*reset card and face postion*/
memset(card_x, 0, 4 * sizeof(int));
memset(card_y, 0, 4 * sizeof(int));
memset(face_x, 0, 4 * sizeof(int));
memset(face_y, 0, 4 * sizeof(int));
/*draw card box line*/
memcpy(card_x, result->ocr.identity.face.cardX, 4 * sizeof(int));
memcpy(card_y, result->ocr.identity.face.cardY, 4 * sizeof(int));
if (card_x && card_y) {
ugraphics_draw_line(card_x[2], card_y[2], card_x[3], card_y[3]);
ugraphics_draw_line(card_x[2], card_y[2] + 1, card_x[3], card_y[3] + 1);
ugraphics_draw_line(card_x[2], card_y[2] + 2, card_x[3], card_y[3] + 2);
ugraphics_draw_line(card_x[3], card_y[3], card_x[0], card_y[0]);
ugraphics_draw_line(card_x[3] - 1, card_y[3], card_x[0] - 1, card_y[0]);
ugraphics_draw_line(card_x[3] - 2, card_y[3], card_x[0] - 2, card_y[0]);
ugraphics_draw_line(card_x[1], card_y[1], card_x[0], card_y[0]);
ugraphics_draw_line(card_x[1], card_y[1] - 1, card_x[0], card_y[0] - 1);
ugraphics_draw_line(card_x[1], card_y[1] - 2, card_x[0], card_y[0] - 2);
ugraphics_draw_line(card_x[2], card_y[2], card_x[1], card_y[1]);
ugraphics_draw_line(card_x[2] + 1, card_y[2], card_x[1] + 1, card_y[1]);
ugraphics_draw_line(card_x[2] + 2, card_y[2], card_x[1] + 2, card_y[1]);
}
/*draw face box line*/
memcpy(face_x, result->ocr.identity.face.faceX, 4 * sizeof(int));
memcpy(face_y, result->ocr.identity.face.faceY, 4 * sizeof(int));
if (face_x && face_y) {
/*draw top line*/
ugraphics_draw_line(face_x[0], face_y[0], face_x[1], face_y[1]);
/*draw left line*/
ugraphics_draw_line(face_x[0], face_y[0], face_x[3], face_y[3]);
/*draw right line*/
ugraphics_draw_line(face_x[1], face_y[1], face_x[2], face_y[2]);
/*draw bottom line*/
ugraphics_draw_line(face_x[3], face_y[3], face_x[2], face_y[2]);
}
return 0;
}
static int recognize_identity_card_back_side_callback(ai_result_t *result)
{
char *start_date = NULL;
char *issue = NULL;
char *end_date = NULL;
if (!result)
return -1;
/*draw start date of identity card's back side*/
start_date = result->ocr.identity.back.startDate;
if (start_date && strlen(start_date) > 0) {
ugraphics_draw_string(start_date, 20, 20);
}
/*draw issue of identity card's back side*/
issue = result->ocr.identity.back.issue;
if (issue && strlen(issue) > 0) {
ugraphics_draw_string(issue, 20, 40);
}
/*draw end date of identity card's back side*/
end_date = result->ocr.identity.back.endDate;
if (end_date && strlen(end_date) > 0) {
ugraphics_draw_string(end_date, 20, 60);
}
return 0;
}
static int recognize_bank_card_callback(ai_result_t *result)
{
char *bank_name = NULL;
char *card_number = NULL;
char *valid_date = NULL;
if (!result)
return -1;
/*draw bank name of bank card*/
bank_name = result->ocr.bank.bankName;
if (bank_name && strlen(bank_name) > 0) {
ugraphics_draw_string(bank_name, 20, 0);
}
/*draw card number of bank card*/
card_number = result->ocr.bank.cardNumber;
if (card_number && strlen(card_number) > 0) {
ugraphics_draw_string(card_number, 20, 20);
}
/*draw valid date of bank card*/
valid_date = result->ocr.bank.validDate;
if (valid_date && strlen(valid_date) > 0) {
ugraphics_draw_string(valid_date, 20, 40);
}
return 0;
}
static int recognize_character_callback(ai_result_t *result)
{
char probability_str[8];
float probability;
char *text = NULL;
int left, top;
if (!result)
return -1;
/*draw character text*/
text = result->ocr.character.text;
left = result->ocr.character.left;
top = result->ocr.character.top;
probability = result->ocr.character.probability;
if (text) {
sprintf(probability_str, "%.2f", probability);
ugraphics_draw_string(text, left, top);
ugraphics_draw_string(probability_str, left, top + 20);
}
return 0;
}
#endif
#ifdef CONFIG_ALICLOUD_IMAGERECOG_ENABLE
static int imagerecog_classifying_rubbish_callback(ai_result_t *result)
{
char rubbish_score_str[16], category_score_str[16];
char *rubbish = NULL;
char *category = NULL;
float rubbish_score;
float category_score;
if (!result)
return -1;
/*draw rubbish name*/
rubbish = result->imagerecog.rubbish.rubbish;
rubbish_score = result->imagerecog.rubbish.rubbishScore;
if (rubbish && strlen(rubbish) > 0) {
sprintf(rubbish_score_str, "%.2f", rubbish_score);
ugraphics_draw_string(rubbish, 20, 20);
// ugraphics_draw_string(rubbish_score_str, 20, 40);
}
/*draw rubbish category*/
category = result->imagerecog.rubbish.category;
category_score = result->imagerecog.rubbish.categoryScore;
if (category && strlen(category) > 0) {
sprintf(category_score_str, "%.2f", category_score);
ugraphics_draw_string(category, 20, 60);
// ugraphics_draw_string(category_score_str, 20, 80);
}
}
static int imagerecog_detect_fruits_callback(ai_result_t *result)
{
char score_str[8];
char *name = NULL;
int tmp_y, x, y;
float score;
if (!result)
return -1;
/*draw fruits name and score*/
name = result->imagerecog.fruits.name;
score = result->imagerecog.fruits.score;
x = result->imagerecog.fruits.box.x;
y = result->imagerecog.fruits.box.y;
if (name && strlen(name) > 0) {
sprintf(score_str, "%.2f", score);
if (y < 60)
tmp_y = y;
else
tmp_y = y - 60;
ugraphics_draw_string(name, x, tmp_y);
ugraphics_draw_string(score_str, x, tmp_y + 40);
}
}
#endif
#ifdef CONFIG_ALICLOUD_IMAGEENHAN_ENABLE
static int imageenhan_erase_person_callback(ai_result_t *result)
{
int ret;
int image_len = 0;
char *image = NULL;
if (!result)
return -1;
/*save image to file system*/
image = result->imageenhan.person.image;
image_len = result->imageenhan.person.imageLen;
if (!image) {
LOGE(TAG, "image is null\n");
return -1;
}
ret = ugraphics_save_image(image, image_len, "/data/erase_person.png");
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
return -1;
}
/*draw image to lcd screen*/
ugraphics_draw_image("/data/erase_person.png", 0, 0);
return 0;
}
static int imageenhan_extend_image_style_callback(ai_result_t *result)
{
int ret;
int major_image_len = 0;
int out_image_len = 0;
char *major_image = NULL;
char *out_image = NULL;
if (!result)
return -1;
/*save image*/
major_image = result->imageenhan.style.majorImage;
major_image_len = result->imageenhan.style.majorImageLen;
out_image = result->imageenhan.style.outImage;
out_image_len = result->imageenhan.style.outImageLen;
if (major_image) {
/*save image*/
ret = ugraphics_save_image(major_image, major_image_len, "/data/major_image.png");
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
return -1;
}
/*draw image to lcd screen*/
ugraphics_draw_image("/data/major_image.png", 0, 0);
} else if (out_image) {
/*save image*/
ret = ugraphics_save_image(out_image, out_image_len, "/data/final_image.png");
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
return -1;
}
/*draw image to lcd screen*/
ugraphics_draw_image("/data/final_image.png", 0, 0);
}
return 0;
}
#endif // UCLOUD_AI_IMAGERECOG_CONFIG
/*
*** ucloud ai demo main thread
*/
int ucloud_ai_demo_main(void *p)
{
int ret = 0;
char *upload_url = NULL;
char *image1 = CAPTURED_IMAGE;
char *image2 = NULL;
ai_engine_cb_t cb;
LOG("start ucloud_ai_demo_main\n");
/*init ucamera service*/
ret = ucamera_service_init(WIFI_CAMERA_NAME);
if (HTTP_SUCCESS != ret) {
LOGE(TAG, "http_app_init failed");
return -1;
}
/*config ucamera*/
ret = ucamera_service_config(UCAMERA_CMD_SET_CONTROL_URL, (void *)WIFICAMERA_FRAME_SIZE_CONTROL_URL);
if (ret < 0) {
LOGE(TAG, "ucamera_service_config frame size failed");
return -1;
}
/*start ucamera*/
ret = ucamera_service_connect(WIFICAMERA_URL);
if (ret < 0) {
LOGE(TAG, "ucamera service start fail\n");
return -1;
}
/*init ai agent service*/
ret = aiagent_service_init("ucloud-ai", AI_MODEL);
if (ret < 0) {
LOGE(TAG, "aiagent_service_init failed");
return -1;
}
while (1) {
frame_buffer_t *frame = ucamera_service_get_frame();
if (!frame) {
LOGE(TAG, "frame is null\n");
continue;
}
/*save file to data folder*/
ret = ugraphics_save_image(frame->buf, frame->len, image1);
if (ret < 0) {
LOGE(TAG, "ugraphics_save_image fail\n");
continue;
}
/*draw camera frame*/
ugraphics_draw_image(image1, 0, 0);
/*get callback function based on current model*/
switch (aiagent_service_get_cur_model()) {
#ifdef CONFIG_ALICLOUD_FACEBODY_ENABLE
case AI_MODEL_COMPARING_FACEBODY:
cb = facebody_compare_callback;
image2 = MYFACE_PATH;
break;
case AI_MODEL_GENERATE_HUMAN_ANIME_STYLE:
cb = generate_human_anime_styple_callback;
break;
case AI_MODEL_RECOGNIZE_EXPRESSION:
cb = recognize_expression_callback;
break;
#endif
#ifdef CONFIG_ALICLOUD_OBJECTDET_ENABLE
case AI_MODEL_DETECT_OBJECT:
cb = detect_object_callback;
LOG("cb = detect_object_callback\n");
break;
case AI_MODEL_DETECT_MAIN_BODY:
cb = detect_main_body_callback;
break;
#endif
#ifdef CONFIG_ALICLOUD_IMAGESEG_ENABLE
case AI_MODEL_SEGMENT_COMMON_IMAGE:
cb = segment_common_image_callback;
break;
case AI_MODEL_SEGMENT_FACE:
cb = segment_face_callback;
break;
#endif
#ifdef CONFIG_ALICLOUD_OCR_ENABLE
case AI_MODEL_RECOGNIZE_IDENTITY_CARD_FACE_SIDE:
cb = recognize_identity_card_face_side_callback;
break;
case AI_MODEL_RECOGNIZE_IDENTITY_CARD_BACK_SIDE:
cb = recognize_identity_card_back_side_callback;
break;
case AI_MODEL_RECOGNIZE_BANK_CARD:
cb = recognize_bank_card_callback;
break;
case AI_MODEL_RECOGNIZE_CHARACTER:
cb = recognize_character_callback;
break;
#endif
#ifdef CONFIG_ALICLOUD_IMAGERECOG_ENABLE
case AI_MODEL_CLASSIFYING_RUBBISH:
cb = imagerecog_classifying_rubbish_callback;
break;
case AI_MODEL_DETECT_FRUITS:
cb = imagerecog_detect_fruits_callback;
break;
#endif
#ifdef CONFIG_ALICLOUD_IMAGEENHAN_ENABLE
case AI_MODEL_ERASE_PERSON:
cb = imageenhan_erase_person_callback;
break;
case AI_MODEL_EXTEND_IMAGE_STYLE:
cb = imageenhan_extend_image_style_callback;
break;
#endif
default:
cb = NULL;
break;
}
/*do ai model inference*/
if (cb)
aiagent_service_model_infer(image1, image2, (ai_engine_cb_t)cb);
/*flip image to lcd screen*/
ugraphics_flip();
}
ucamera_service_disconnect();
ucamera_service_uninit();
aiagent_service_uninit();
LOG("ucloud_ai_demo_main end\n");
return 0;
}
|
YifuLiu/AliOS-Things
|
solutions/ucloud_ai_demo/ucloud_ai_demo.c
|
C
|
apache-2.0
| 19,962
|
# coding=utf-8
# This is a sample Python script.
import utime
from video import camera
print("start video preview test")
# 打开摄像头
camera = camera()
camera.open(0)
# 开启视频采集和预览功能,持续10000秒
camera.preview(10000)
# 关闭摄像头
camera.close()
print("end video preview test")
|
YifuLiu/AliOS-Things
|
solutions/videopreview_demo/src/main.py
|
Python
|
apache-2.0
| 318
|
# coding=utf-8
# This is a sample Python script.
import usys
import utime
import network
from video import Recorder
# 检查参数
if len(usys.argv) < 3:
print("Usage: %s <ssid> <password>" % usys.argv[0])
usys.exit(1)
# 定义网络事件回调函数
def network_evt_cb(eid):
print('%s:%d' %('eid', eid))
# 连接网络
network.init(network.WIFI)
network.connect(usys.argv[1], usys.argv[2], network_evt_cb)
network.close()
# 等待网络连接
utime.sleep_ms(10000)
print("start recorder with rtsp test")
# 开启视频流功能
recorder = Recorder()
recorder.open(0, recorder.H264)
recorder.start()
utime.sleep_ms(100000000)
# 关闭视频流功能
recorder.stop()
recorder.close()
print("end recorder test")
|
YifuLiu/AliOS-Things
|
solutions/videortsp_demo/src/main.py
|
Python
|
apache-2.0
| 732
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
yp9522/docs_wyzAOP
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
yinlin/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zgh8848/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
yuzhewang/docs_9845
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zzz701/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zzc19990127/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
yy822811/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zhushuanghong/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zengyawen/docs
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zzz701/docs_6712
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zhushuanghong/docs_1
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
WORKDIR /home/openharmony
RUN sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list \
&& apt-get update -y \
&& apt-get install -y apt-utils binutils bison flex bc build-essential make mtd-utils gcc-arm-linux-gnueabi u-boot-tools python3.8 python3-pip git zip unzip curl wget gcc g++ ruby=1:2.5.1 dosfstools mtools default-jre default-jdk scons python3.8-distutils perl openssl libssl-dev cpio git-lfs m4 ccache zlib1g-dev tar rsync liblz4-tool genext2fs binutils-dev device-tree-compiler e2fsprogs git-core gnupg gnutls-bin gperf lib32ncurses5-dev libffi-dev zlib* libelf-dev libx11-dev libgl1-mesa-dev lib32z1-dev xsltproc x11proto-core-dev libc6-dev-i386 libxml2-dev lib32z-dev libdwarf-dev \
&& apt-get install -y grsync xxd libglib2.0-dev libpixman-1-dev kmod jfsutils reiserfsprogs xfsprogs squashfs-tools pcmciautils quota ppp libtinfo-dev libtinfo5 libncurses5 libncurses5-dev libncursesw5 libstdc++6 python2.7 gcc-arm-none-eabi \
&& apt-get install -y vim ssh locales \
&& apt-get install -y doxygen \
&& locale-gen "en_US.UTF-8" \
&& rm -rf /bin/sh /usr/bin/python /usr/bin/python3 /usr/bin/python3m \
&& ln -s /bin/bash /bin/sh \
&& ln -s /usr/bin/python3.8 /usr/bin/python3 \
&& ln -s /usr/bin/python3.8 /usr/bin/python3m \
&& ln -s /usr/bin/python3.8 /usr/bin/python \
&& curl https://gitee.com/oschina/repo/raw/fork_flow/repo-py3 > /usr/bin/repo \
&& chmod +x /usr/bin/repo \
&& pip3 install --trusted-host https://repo.huaweicloud.com -i https://repo.huaweicloud.com/repository/pypi/simple requests setuptools pymongo kconfiglib pycryptodome ecdsa ohos-build pyyaml prompt_toolkit==1.0.14 redis json2html yagmail python-jenkins \
&& pip3 install esdk-obs-python --trusted-host pypi.org \
&& pip3 install six --upgrade --ignore-installed six \
&& mkdir -p /home/tools \
&& mkdir -p /home/tools/gn \
&& wget -P /home/tools https://repo.huaweicloud.com/openharmony/compiler/clang/12.0.1-530132/linux/clang-530132-linux-x86_64.tar.bz2 \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/hc-gen/0.65/linux/hc-gen-0.65-linux.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gcc_riscv32/7.3.0/linux/gcc_riscv32-linux-7.3.0.tar.gz \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/ninja/1.9.0/linux/ninja.1.9.0.tar \
&& wget -P /home/tools https://repo.huaweicloud.com/harmonyos/compiler/gn/1717/linux/gn-linux-x86-1717.tar.gz \
&& wget -P /home/tools https://mirrors.huaweicloud.com/nodejs/v14.15.4/node-v14.15.4-linux-x64.tar.xz \
&& wget -P /home/tools https://hm-verify.obs.cn-north-4.myhuaweicloud.com/qemu-5.2.0.tar.xz \
&& tar -jxvf /home/tools/clang-530132-linux-x86_64.tar.bz2 -C /home/tools \
&& mv /home/tools/clang-530132 /home/tools/llvm \
&& tar -xvf /home/tools/hc-gen-0.65-linux.tar -C /home/tools \
&& tar -xvf /home/tools/gcc_riscv32-linux-7.3.0.tar.gz -C /home/tools \
&& tar -xvf /home/tools/ninja.1.9.0.tar -C /home/tools \
&& tar -xvf /home/tools/gn-linux-x86-1717.tar.gz -C /home/tools/gn \
&& tar -xJf /home/tools/node-v14.15.4-linux-x64.tar.xz -C /home/tools \
&& cp /home/tools/node-v14.15.4-linux-x64/bin/node /usr/local/bin \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /home/tools/node-v14.15.4-linux-x64/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
&& tar -xJf /home/tools/qemu-5.2.0.tar.xz -C /home/tools \
&& sed -i '$aexport PATH=/home/tools/llvm/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/hc-gen:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gcc_riscv32/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/ninja:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/node-v14.15.4-linux-x64/bin:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/home/tools/gn:$PATH' /root/.bashrc \
&& sed -i '$aexport PATH=/root/.local/bin:$PATH' /root/.bashrc \
&& export PATH=/home/tools/llvm/bin:$PATH \
&& export PATH=/home/tools/hc-gen:$PATH \
&& export PATH=/home/tools/gcc_riscv32/bin:$PATH \
&& export PATH=/home/tools/ninja:$PATH \
&& export PATH=/home/tools/node-v12.20.0-linux-x64/bin:$PATH \
&& export PATH=/home/tools/gn:$PATH \
&& export PATH=/root/.local/bin:$PATH \
&& cd /home/tools/qemu-5.2.0 \
&& mkdir build \
&& cd build \
&& ../configure --target-list=arm-softmmu \
&& make -j \
&& make install \
&& cd /home/openharmony \
&& rm -rf /home/tools/*.tar \
&& rm -rf /home/tools/*.gz \
&& rm -rf /home/tools/*.xz \
&& rm -rf /home/tools/qemu-5.2.0 \
&& npm install -g @ohos/hpm-cli --registry https://mirrors.huaweicloud.com/repository/npm/
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
zhushuanghong/docs_2
|
docker/Dockerfile
|
Dockerfile
|
public-domain
| 5,461
|