static IEnumerable<T> FindUniqueNumbersInCollection<T>(ICollection<T> value)
{
Dictionary<T, byte> hash = new Dictionary<T, byte>();
foreach (T 开发者_JAVA技巧val in value)
{
if (hash.ContainsKey(val)) { hash[val] = 1; continue; }
hash.Add(val, 0);
}
List<T> unique = new List<T>();
foreach (KeyValuePair<T, byte> kvp in hash)
{
if (kvp.Value == 0) unique.Add(kvp.Key);
}
return unique;
}
Edit:
I think this is finally correct. :)
var dict = new Dictionary<T, bool>();
foreach (var v in value)
dict[v] = dict.ContainsKey(v);
foreach (var pair in dict)
if (!pair.Value)
yield return pair.Key; //Algorithmically, as fast as possible
Or if you'd like some LINQ:
var dict = new Dictionary<T, bool>();
foreach (var v in value)
dict[v] = dict.ContainsKey(v);
return dict.Keys.Where(k => !dict[k]); //*COULD* be slower on lots of collisions
Or even
var dict = new Dictionary<T, bool>();
foreach (var v in value)
dict[v] = dict.ContainsKey(v);
return dict.Where(p => !p.Value).Select(p => p.Key); //"Fastest".. but not really
I wouldn't say it's "more efficient" than yours (it's really not -- they're pretty much the same), but it's definitely shorter.
If you want overkill efficiency (at the cost of accuracy), you can always use a Bloom Filter!
Since you're returning an IEnumerable<T>
, you could
return hash.Where(kvp => kvp.Value == 0).Select(kvp => kvp.Key);
instead of the second loop to perform that iteration lazily.
The following code will do this (in one statement).
var unqiue = new[] { 1, 1, 2, 3, 3, 4 }
.GroupBy(x => x) // groups the elements using the element value as the key
.Where(x => x.Count() == 1) // restricts to only those groups having a count of 1
.Select(x => x.Key); // selects the key from each of those groups
(You will need to have using System.Linq;
in your code file.)
Although your ultimate goal is efficiency, I would use the simplest code possible (which I think this is) and then optimise when the profiler tells me to.
精彩评论