Problem: Design a data structure that supports all following operations in average O(1) time.
- insert(val): Inserts an item val to the collection.
- remove(val): Removes an item val from the collection if present.
- getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
Note: Duplicate elements are allowed.
Approach: This problem is very similar to Design Data Structure with Insert, Delete, GetRandom at O(1) without duplicates. We just need to handle duplicates here which you can understand by just looking at the implementation.
Implementation in C#:
public class RandomizedCollection
{
public RandomizedCollection()
{
this.hash = new Dictionary<int, HashSet<int>>();
this.list = new List<int>();
}
public bool Insert(int val)
{
bool isExist = this.hash.ContainsKey(val);
if (!isExist)
{
this.hash.Add(val, new HashSet<int>());
}
this.list.Add(val);
this.hash[val].Add(this.list.Count - 1);
return !isExist;
}
public bool Remove(int val)
{
if (!this.hash.ContainsKey(val))
{
return false;
}
int index = this.hash[val].First();
this.hash[val].Remove(index);
if (this.hash[val].Count == 0)
{
this.hash.Remove(val);
}
this.list[index] = this.list[this.list.Count - 1];
this.list.RemoveAt(this.list.Count - 1);
if (index != this.list.Count)
{
this.hash[this.list[index]].Remove(this.list.Count);
this.hash[this.list[index]].Add(index);
}
return true;
}
public int GetRandom()
{
int randomIndex = new Random().Next() % this.list.Count;
return this.list[randomIndex];
}
private Dictionary<int, HashSet<int>> hash;
List<int> list;
}
Complexity: O(1) for all the methods.
No comments:
Post a Comment