Added a simple non-optimized StringTokenizer-class for tokenizing strings. Also added a contains(char)-function to the String-class because it was handy in implementing the StringTokenizer.
svn-id: r30828
This commit is contained in:
parent
e3852c92a7
commit
b6cad0f0ce
4 changed files with 58 additions and 0 deletions
|
@ -96,6 +96,33 @@ bool matchString(const char *str, const char *pat) {
|
|||
}
|
||||
}
|
||||
|
||||
StringTokenizer::StringTokenizer(const String &str, const String &delimiters) : _str(str), _delimiters(delimiters) {
|
||||
reset();
|
||||
}
|
||||
|
||||
void StringTokenizer::reset() {
|
||||
_tokenBegin = _tokenEnd = 0;
|
||||
}
|
||||
|
||||
bool StringTokenizer::empty() const {
|
||||
// Search for the next token's start (i.e. the next non-delimiter character)
|
||||
for (uint i = _tokenEnd; i < _str.size(); i++) {
|
||||
if (!_delimiters.contains(_str[i]))
|
||||
return false; // Found a token so the tokenizer is not empty
|
||||
}
|
||||
// Didn't find any more tokens so the tokenizer is empty
|
||||
return true;
|
||||
}
|
||||
|
||||
String StringTokenizer::nextToken() {
|
||||
// Seek to next token's start (i.e. jump over the delimiters before next token)
|
||||
for (_tokenBegin = _tokenEnd; _tokenBegin < _str.size() && _delimiters.contains(_str[_tokenBegin]); _tokenBegin++);
|
||||
// Seek to the token's end (i.e. jump over the non-delimiters)
|
||||
for (_tokenEnd = _tokenBegin; _tokenEnd < _str.size() && !_delimiters.contains(_str[_tokenEnd]); _tokenEnd++);
|
||||
// Return the found token
|
||||
return String(_str.c_str() + _tokenBegin, _tokenEnd - _tokenBegin);
|
||||
}
|
||||
|
||||
//
|
||||
// Print hexdump of the data passed in
|
||||
//
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue